diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index b507d1c9e9..b501fa8655 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -9,7 +9,10 @@ "customizations": { "vscode": { // Add the IDs of extensions you want installed when the container is created. - "extensions": ["matklad.rust-analyzer", "microsoft.Docker"], + "extensions": [ + "rust-lang.rust-analyzer", + "microsoft.Docker" + ], // Set *default* container specific settings.json values on container create. "settings": { "rust-analyzer.cargo.noDefaultFeatures": true @@ -20,7 +23,7 @@ // "forwardPorts": [], // Uncomment the next line to run commands after the container is created - for example installing curl. // Install development components that shouldn't be in the main Dockerfile - "postCreateCommand": "rustup component add --toolchain nightly rustfmt clippy llvm-tools-preview && cargo install --locked cargo-make", + "postCreateCommand": "rustup component add --toolchain nightly rustfmt clippy llvm-tools-preview && cargo binstall --locked cargo-make", // Uncomment when using a ptrace-based debugger like C++, Go, and Rust "runArgs": [ "--cap-add=SYS_PTRACE", @@ -31,4 +34,4 @@ // "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ], // Uncomment to connect as a non-root user if you've added one. See https://aka.ms/vscode-remote/containers/non-root. // "remoteUser": "vscode" -} +} \ No newline at end of file diff --git a/.dockerignore b/.dockerignore index a00386f1f7..4d7b3cf3cd 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,5 @@ -target +**/target +**/.git Cargo.lock *.o diff --git a/.github/.linkspector.yml b/.github/.linkspector.yml new file mode 100644 index 0000000000..e563b9145d --- /dev/null +++ b/.github/.linkspector.yml @@ -0,0 +1,14 @@ +dirs: + - . + +useGitIgnore: true + +ignorePatterns: + - pattern: "^https://crates.io" + - pattern: "^https://github.com/AFLplusplus/linux-qemu-image-builder" + - pattern: "https://www.romu-random.org/" + +aliveStatusCodes: + - 0 + - 200 + - 403 \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 03bf8a3ae0..d5f385b01f 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,4 +3,6 @@ updates: - package-ecosystem: "cargo" directory: "/" schedule: - interval: "daily" \ No newline at end of file + interval: "daily" + ignore: + - dependency-name: "pyo3" diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index 8cf9b3f9d8..5fa33cf098 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -5,11 +5,14 @@ on: branches: [ main, "pr/**" ] pull_request: branches: [ main ] + types: ["labeled", "opened", "synchronize", "reopened"] workflow_dispatch: merge_group: env: CARGO_TERM_COLOR: always CARGO_NET_GIT_FETCH_WITH_CLI: true + MAIN_LLVM_VERSION: 18 + concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true @@ -19,29 +22,29 @@ jobs: strategy: fail-fast: false matrix: - os: [ ubuntu-latest, windows-latest, macOS-latest ] + os: [ ubuntu-24.04, windows-latest, macOS-latest ] runs-on: ${{ matrix.os }} steps: - name: Install mimetype if: runner.os == 'Linux' - run: sudo apt-get install libfile-mimeinfo-perl - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: nightly - override: true - - name: Install mimetype - if: runner.os == 'Linux' - run: sudo apt-get install libfile-mimeinfo-perl + run: sudo apt-get update && sudo apt-get install -y libfile-mimeinfo-perl - name: install mdbook - uses: baptiste0928/cargo-install@v1.3.0 + uses: baptiste0928/cargo-install@v3 with: crate: mdbook - name: install linkcheck - uses: baptiste0928/cargo-install@v1.3.0 + uses: baptiste0928/cargo-install@v3 with: crate: mdbook-linkcheck - - uses: actions/checkout@v3 + # NOTE: The current crates.io release of mdbook-linkcheck (v0.7.7) is broken + # => https://github.com/Michael-F-Bryan/mdbook-linkcheck/pull/82#issuecomment-2241058491 + git: https://github.com/Michael-F-Bryan/mdbook-linkcheck.git + rev: 8c783c5d754d83bcd50c28fb4174854b04ece990 + - name: default nightly + run: rustup default nightly + - uses: actions/checkout@v4 + - if: runner.os == 'Linux' + uses: ./.github/workflows/ubuntu-prepare - uses: Swatinem/rust-cache@v2 with: { shared-key: "ubuntu" } if: runner.os == 'Linux' @@ -50,14 +53,15 @@ jobs: - name: Check for binary blobs if: runner.os == 'Linux' run: ./scripts/check_for_blobs.sh - - name: default nightly - run: rustup default nightly - name: Build libafl debug run: cargo build -p libafl - - name: Test the book + - name: Test the book (Linux) # TODO: fix books test fail with updated windows-rs - if: runner.os != 'Windows' + if: runner.os == 'Linux' run: cd docs && mdbook test -L ../target/debug/deps + - name: Test the book (MacOS) + if: runner.os == 'MacOS' + run: cd docs && mdbook test -L ../target/debug/deps $(python3-config --ldflags | cut -d ' ' -f1) - name: Run tests run: cargo test - name: Test libafl no_std @@ -67,33 +71,10 @@ jobs: - name: Test libafl_targets no_std run: cd libafl_targets && cargo test --no-default-features - llvm-tester: - runs-on: ubuntu-22.04 - continue-on-error: true - strategy: - matrix: - llvm-version: [ "16", "17" ] # Add 18 when KyleMayes/install-llvm-action enables it - steps: - - name: Remove Dotnet & Haskell - run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - uses: actions/checkout@v3 - - uses: Swatinem/rust-cache@v2 - with: { shared-key: "llvm-tester" } - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v2 - with: - version: "${{matrix.llvm-version}}" - - name: Build and test with llvm-${{ matrix.llvm-version }} - run: pwd && ls & cd libafl_cc && cargo build --release - ubuntu-doc-build: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/workflows/ubuntu-prepare - uses: Swatinem/rust-cache@v2 # ---- doc check ---- @@ -101,9 +82,9 @@ jobs: run: RUSTFLAGS="--cfg docsrs" cargo +nightly doc --all-features --no-deps ubuntu-doc-test: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/workflows/ubuntu-prepare - uses: Swatinem/rust-cache@v2 # ---- doc check ---- @@ -111,41 +92,27 @@ jobs: run: RUSTFLAGS="--cfg docsrs" cargo +nightly test --doc --all-features ubuntu-miri: - runs-on: ubuntu-22.04 - needs: ubuntu + runs-on: ubuntu-24.04 + if: contains( github.event.pull_request.labels.*.name, 'pre-release') steps: - - uses: actions/checkout@v3 - - uses: ./.github/workflows/ubuntu-prepare - - uses: Swatinem/rust-cache@v2 - name: Add nightly clippy run: rustup toolchain install nightly --component miri --allow-downgrade + - uses: actions/checkout@v4 + - uses: ./.github/workflows/ubuntu-prepare + - uses: Swatinem/rust-cache@v2 # --- miri undefined behavior test -- - name: Run miri tests run: RUST_BACKTRACE=1 MIRIFLAGS="-Zmiri-disable-isolation" cargo +nightly miri test ubuntu: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Remove Dotnet & Haskell run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - name: Remove existing clang and LLVM - run: sudo apt purge llvm* clang* lld* lldb* opt* - - name: Install and cache deps - run: sudo apt update && sudo apt install ninja-build shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev libz3-dev - - name: Add nightly clippy - run: rustup toolchain install nightly --component clippy --component miri --allow-downgrade - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + - uses: ./.github/workflows/ubuntu-prepare - uses: Swatinem/rust-cache@v2 with: { shared-key: "ubuntu" } - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v2 - with: - directory: ${{ runner.temp }}/llvm - version: 17 # pcguard edges and pcguard hitcounts are not compatible and we need to build them seperately - name: Check pcguard edges run: cargo check --features=sancov_pcguard_edges @@ -158,42 +125,31 @@ jobs: run: cargo build --examples --verbose ubuntu-clippy: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Remove Dotnet & Haskell run: rm -rf /usr/share/dotnet && rm -rf /opt/ghc - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - - name: Install and cache deps - run: sudo apt update && sudo apt install ninja-build shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev libz3-dev - name: Add nightly clippy run: rustup toolchain install nightly --component clippy --allow-downgrade && rustup default nightly - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + - uses: ./.github/workflows/ubuntu-prepare - uses: Swatinem/rust-cache@v2 with: { shared-key: "ubuntu" } - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v2 - with: - directory: ${{ runner.temp }}/llvm - version: 17 - name: Run clippy - run: ./scripts/clippy.sh + run: LLVM_CONFIG=llvm-config-${{env.MAIN_LLVM_VERSION}} ./scripts/clippy.sh # --- test embedding the libafl_libfuzzer_runtime library # Fix me plz # - name: Test Build libafl_libfuzzer with embed # run: cargo +nightly test --features=embed-runtime --manifest-path libafl_libfuzzer/Cargo.toml ubuntu-check: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 needs: ubuntu strategy: matrix: instance_idx: [ "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17" ] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/workflows/ubuntu-prepare - uses: Swatinem/rust-cache@v2 with: { shared-key: "ubuntu" } @@ -205,14 +161,13 @@ jobs: run: python3 ./scripts/parallellize_cargo_check.py ${{ matrix.instance_idx }} ubuntu-concolic: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: ubuntu steps: - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - uses: actions/checkout@v3 + - name: Install curl + run: sudo apt-get update && sudo apt-get install clang + - uses: dtolnay/rust-toolchain@stable + - uses: actions/checkout@v4 - uses: Swatinem/rust-cache@v2 with: { shared-key: "ubuntu" } - name: Install smoke test deps @@ -221,208 +176,254 @@ jobs: run: ./libafl_concolic/test/smoke_test.sh python-bindings: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - name: Remove existing clang and LLVM - run: sudo apt purge llvm* clang* - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v2 - with: - directory: ${{ runner.temp }}/llvm - version: 17 - name: Install deps - run: sudo apt-get install -y ninja-build python3-dev python3-pip python3-venv libz3-dev + run: sudo apt-get update && sudo apt-get install -y lsb-release wget software-properties-common gnupg ninja-build python3-dev python3-pip python3-venv libz3-dev - name: Install maturin - run: python3 -m pip install maturin - - uses: actions/checkout@v3 + run: cargo install --locked maturin + - uses: actions/checkout@v4 + - uses: ./.github/workflows/ubuntu-prepare - uses: Swatinem/rust-cache@v2 + with: { shared-key: "ubuntu" } - name: Run a maturin build - run: export LLVM_CONFIG=llvm-config-16 && cd ./bindings/pylibafl && python3 -m venv .env && . .env/bin/activate && pip install --upgrade --force-reinstall . && ./test.sh + run: export LLVM_CONFIG=llvm-config-${{env.MAIN_LLVM_VERSION}} && cd ./bindings/pylibafl && python3 -m venv .env && . .env/bin/activate && pip install --upgrade --force-reinstall . && ./test.sh - name: Run python test - run: . ./bindings/pylibafl/.env/bin/activate # && cd ./fuzzers/python_qemu/ && python3 fuzzer.py 2>&1 | grep "Bye" + run: . ./bindings/pylibafl/.env/bin/activate # && cd ./fuzzers/binary_only/python_qemu/ && python3 fuzzer.py 2>&1 | grep "Bye" cargo-fmt: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 + env: + MAIN_LLVM_VERSION: 19 steps: - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: nightly - override: true - components: rustfmt - - uses: actions/checkout@v3 - - name: Remove existing clang and LLVM - run: sudo apt purge llvm* clang* - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v2 - with: - directory: ${{ runner.temp }}/llvm - version: 17 + - uses: actions/checkout@v4 + - uses: ./.github/workflows/ubuntu-prepare + - name: Add rustfmt nightly + shell: bash + run: rustup component add --toolchain nightly-x86_64-unknown-linux-gnu rustfmt + - uses: Swatinem/rust-cache@v2 + with: { shared-key: "ubuntu" } + - name: Installing black + run: python3 -m pip install black - name: Format Check run: ./scripts/fmt_all.sh check - fuzzers-preflight: + check-md-links: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 + - name: Install linkspector + shell: bash + run: sudo apt-get update && sudo apt-get install -y npm && npm install -g @umbrelladocs/linkspector + - name: Run linkspector + shell: bash + run: ./scripts/check_md_links.sh + # TODO: Use github action once it's fixed (https://github.com/UmbrellaDocs/action-linkspector/issues/20) + # - name: Run linkspector + # uses: umbrelladocs/action-linkspector@v1 + # with: + # fail_on_error: 'true' + # config_file: '.github/.linkspector.yml' + + msrv: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: taiki-e/install-action@cargo-hack + # Note: We currently only specify minimum rust versions for the default workspace members + - run: cargo hack check --rust-version -p libafl -p libafl_bolts -p libafl_derive -p libafl_cc -p libafl_targets + + fuzzers-preflight: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 - name: Fuzzer in CI Check run: ./scripts/check_tested_fuzzers.sh fuzzers: needs: - - ubuntu - fuzzers-preflight strategy: + fail-fast: true matrix: - os: [ ubuntu-latest ] + os: [ ubuntu-24.04 ] fuzzer: - - ./fuzzers/cargo_fuzz - - ./fuzzers/fuzzbench_fork_qemu - - ./fuzzers/libfuzzer_stb_image_sugar - - ./fuzzers/nyx_libxml2_standalone - - ./fuzzers/baby_fuzzer_gramatron - - ./fuzzers/tinyinst_simple - - ./fuzzers/baby_fuzzer_with_forkexecutor - - ./fuzzers/baby_no_std - - ./fuzzers/baby_fuzzer_swap_differential - - ./fuzzers/baby_fuzzer_grimoire - - ./fuzzers/baby_fuzzer - - ./fuzzers/libfuzzer_libpng_launcher - - ./fuzzers/libfuzzer_libpng_accounting - - ./fuzzers/forkserver_libafl_cc - - ./fuzzers/libfuzzer_libpng_tcp_manager - - ./fuzzers/backtrace_baby_fuzzers - - ./fuzzers/fuzzbench_qemu - - ./fuzzers/nyx_libxml2_parallel - - ./fuzzers/frida_gdiplus - - ./fuzzers/libfuzzer_stb_image_concolic - - ./fuzzers/nautilus_sync - - ./fuzzers/push_harness - - ./fuzzers/libfuzzer_libpng_centralized - - ./fuzzers/baby_fuzzer_nautilus - - ./fuzzers/fuzzbench_text - - ./fuzzers/libfuzzer_libpng_cmin - - ./fuzzers/forkserver_simple - - ./fuzzers/baby_fuzzer_unicode - - ./fuzzers/libfuzzer_libpng_norestart - - ./fuzzers/baby_fuzzer_multi - - ./fuzzers/libafl_atheris - - ./fuzzers/frida_libpng - - ./fuzzers/fuzzbench_ctx - - ./fuzzers/fuzzbench_forkserver_cmplog - - ./fuzzers/push_stage_harness - - ./fuzzers/libfuzzer_libmozjpeg - - ./fuzzers/libfuzzer_libpng_aflpp_ui - - ./fuzzers/libfuzzer_libpng - - ./fuzzers/baby_fuzzer_wasm - - ./fuzzers/fuzzbench - - ./fuzzers/libfuzzer_stb_image - - ./fuzzers/fuzzbench_forkserver - # - ./fuzzers/libfuzzer_windows_asan - # - ./fuzzers/dynamic_analysis - - ./fuzzers/baby_fuzzer_minimizing - - ./fuzzers/frida_executable_libpng - - ./fuzzers/tutorial - - ./fuzzers/baby_fuzzer_tokens - - ./fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor - - ./fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor - - ./fuzzers/backtrace_baby_fuzzers/command_executor - - ./fuzzers/backtrace_baby_fuzzers/forkserver_executor - - ./fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor - - ./fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor + # Baby + - ./fuzzers/baby/baby_fuzzer_swap_differential + - ./fuzzers/baby/tutorial + - ./fuzzers/baby/baby_fuzzer + # - ./fuzzers/baby/backtrace_baby_fuzzers + - ./fuzzers/baby/baby_fuzzer_unicode + - ./fuzzers/baby/baby_fuzzer_minimizing + - ./fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor + - ./fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor + - ./fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_fork_executor + - ./fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_inprocess_executor + - ./fuzzers/baby/backtrace_baby_fuzzers/command_executor + - ./fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor + - ./fuzzers/baby/baby_fuzzer_custom_executor + + # Binary-only + - ./fuzzers/binary_only/fuzzbench_fork_qemu + - ./fuzzers/binary_only/frida_executable_libpng + - ./fuzzers/binary_only/frida_windows_gdiplus + - ./fuzzers/binary_only/frida_libpng + - ./fuzzers/binary_only/fuzzbench_qemu + - ./fuzzers/binary_only/intel_pt_baby_fuzzer + - ./fuzzers/binary_only/intel_pt_command_executor + - ./fuzzers/binary_only/tinyinst_simple + + # Forkserver + - ./fuzzers/forkserver/forkserver_simple + - ./fuzzers/forkserver/forkserver_libafl_cc + - ./fuzzers/forkserver/fuzzbench_forkserver + - ./fuzzers/forkserver/fuzzbench_forkserver_cmplog + - ./fuzzers/forkserver/libafl-fuzz + - ./fuzzers/forkserver/baby_fuzzer_with_forkexecutor + + # Full-system + - ./fuzzers/full_system/nyx_libxml2_standalone + - ./fuzzers/full_system/nyx_libxml2_parallel + + # Structure-aware + - ./fuzzers/structure_aware/nautilus_sync + - ./fuzzers/structure_aware/baby_fuzzer_grimoire + - ./fuzzers/structure_aware/baby_fuzzer_gramatron + - ./fuzzers/structure_aware/baby_fuzzer_tokens + - ./fuzzers/structure_aware/baby_fuzzer_multi + - ./fuzzers/structure_aware/baby_fuzzer_custom_input + - ./fuzzers/structure_aware/baby_fuzzer_nautilus + - ./fuzzers/structure_aware/forkserver_simple_nautilus + + # In-process + - ./fuzzers/fuzz_anything/cargo_fuzz + # - ./fuzzers/inprocess/dynamic_analysis + - ./fuzzers/inprocess/fuzzbench + - ./fuzzers/inprocess/fuzzbench_text + - ./fuzzers/inprocess/fuzzbench_ctx + - ./fuzzers/inprocess/libfuzzer_libmozjpeg + - ./fuzzers/inprocess/libfuzzer_libpng + - ./fuzzers/inprocess/libfuzzer_libpng_launcher + - ./fuzzers/inprocess/libfuzzer_libpng_accounting + - ./fuzzers/inprocess/libfuzzer_libpng_centralized + - ./fuzzers/inprocess/libfuzzer_libpng_cmin + - ./fuzzers/inprocess/libfuzzer_libpng_norestart + # - ./fuzzers/inprocess/libfuzzer_libpng_tcp_manager + - ./fuzzers/inprocess/libfuzzer_stb_image_sugar + - ./fuzzers/inprocess/libfuzzer_stb_image + # - ./fuzzers/structure_aware/libfuzzer_stb_image_concolic + # - ./fuzzers/inprocess/libfuzzer_windows_asan + # - ./fuzzers/inprocess/sqlite_centralized_multi_machine + + # Fuzz Anything + - ./fuzzers/fuzz_anything/push_harness + - ./fuzzers/fuzz_anything/push_stage_harness + - ./fuzzers/fuzz_anything/libafl_atheris + - ./fuzzers/fuzz_anything/baby_no_std + - ./fuzzers/fuzz_anything/baby_fuzzer_wasm + runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/workflows/fuzzer-tester-prepare - name: Build and run example fuzzers (Linux) if: runner.os == 'Linux' shell: bash - run: RUN_ON_CI=1 LLVM_CONFIG=llvm-config ./scripts/test_fuzzer.sh ${{ matrix.fuzzer }} + run: RUN_ON_CI=1 LLVM_CONFIG=llvm-config-${{env.MAIN_LLVM_VERSION}} ./scripts/test_fuzzer.sh ${{ matrix.fuzzer }} changes: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 permissions: pull-requests: read outputs: qemu: ${{ steps.filter.outputs.qemu }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: dorny/paths-filter@v3 id: filter with: filters: | qemu: + - '.github/**' + - 'libafl/**' + - 'libafl_bolts/**' + - 'libafl_targets/**' - 'libafl_qemu/**' - 'fuzzers/*qemu*/**' fuzzers-qemu: - needs: changes + needs: + - changes if: ${{ needs.changes.outputs.qemu == 'true' }} strategy: matrix: - os: [ubuntu-latest] + os: [ubuntu-24.04] fuzzer: - - ./fuzzers/qemu_cmin - - ./fuzzers/qemu_systemmode - - ./fuzzers/qemu_coverage - - ./fuzzers/qemu_launcher + # Binary only + - ./fuzzers/binary_only/qemu_cmin + - ./fuzzers/binary_only/qemu_coverage + - ./fuzzers/binary_only/qemu_launcher + + # Full-system + - ./fuzzers/full_system/qemu_baremetal + # - ./fuzzers/full_system/qemu_linux_kernel + #- ./fuzzers/full_system/qemu_linux_process runs-on: [ self-hosted, qemu ] container: registry.gitlab.com/qemu-project/qemu/qemu/ubuntu2204:latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/workflows/qemu-fuzzer-tester-prepare - name: Build and run example QEMU fuzzers (Linux) if: runner.os == 'Linux' shell: bash - run: RUN_ON_CI=1 LLVM_CONFIG=llvm-config ./scripts/test_fuzzer.sh ${{ matrix.fuzzer }} + run: RUN_ON_CI=1 LLVM_CONFIG=llvm-config-${{env.MAIN_LLVM_VERSION}} ./scripts/test_fuzzer.sh ${{ matrix.fuzzer }} nostd-build: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@nightly with: - profile: minimal - toolchain: nightly - override: true components: rust-src - - uses: actions/checkout@v3 - - uses: Swatinem/rust-cache@v2 - name: Add targets run: rustup target add arm-linux-androideabi && rustup target add thumbv6m-none-eabi + - uses: actions/checkout@v4 + - uses: Swatinem/rust-cache@v2 - name: Build aarch64-unknown-none - run: cd ./fuzzers/baby_no_std && cargo +nightly build -Zbuild-std=core,alloc --target aarch64-unknown-none -v --release && cd ../.. + run: cd ./fuzzers/fuzz_anything/baby_no_std && cargo +nightly build -Zbuild-std=core,alloc --target aarch64-unknown-none -v --release && cd ../.. - name: run x86_64 until panic! - run: cd ./fuzzers/baby_no_std && cargo +nightly run || test $? -ne 0 || exit 1 + run: cd ./fuzzers/fuzz_anything/baby_no_std && cargo +nightly run || test $? -ne 0 || exit 1 - name: no_std tests run: cd ./libafl && cargo test --no-default-features nostd-clippy: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@nightly with: - profile: minimal - toolchain: nightly - override: true components: clippy, rust-src - - uses: actions/checkout@v3 - - uses: Swatinem/rust-cache@v2 - name: Add targets run: rustup target add arm-linux-androideabi && rustup target add thumbv6m-none-eabi + - uses: actions/checkout@v4 + - uses: Swatinem/rust-cache@v2 - name: libafl armv6m-none-eabi (32 bit no_std) clippy run: cd ./libafl && cargo clippy --target thumbv6m-none-eabi --no-default-features - name: Build no_std no_alloc bolts run: cd ./libafl_bolts && cargo +nightly build -Zbuild-std=core --target aarch64-unknown-none --no-default-features -v --release && cd ../ - build-docker: - runs-on: ubuntu-latest + format-toml: + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v3 + - name: Install taplo + run: curl -fsSL https://github.com/tamasfe/taplo/releases/latest/download/taplo-full-linux-x86_64.gz | gzip -d - | install -m 755 /dev/stdin /usr/local/bin/taplo + - uses: actions/checkout@v4 + - name: Run taplo + run: taplo format --check + + build-docker: + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 - name: Build docker run: docker build -t libafl . @@ -431,54 +432,51 @@ jobs: needs: - common steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/workflows/windows-tester-prepare - - name: Build fuzzers/frida_libpng - run: cd fuzzers/frida_libpng/ && cargo make test + - name: Build fuzzers/binary_only/frida_libpng + run: cd fuzzers/binary_only/frida_libpng/ && cargo make test windows-frida-libfuzzer-stb-image: runs-on: windows-latest needs: - common steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/workflows/windows-tester-prepare - - name: Build fuzzers/libfuzzer_stb_image - run: cd fuzzers/libfuzzer_stb_image && cargo build --release + - name: Build fuzzers/inprocess/libfuzzer_stb_image + run: cd fuzzers/inprocess/libfuzzer_stb_image && cargo build --release windows-frida-gdiplus: runs-on: windows-latest needs: - common steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: ./.github/workflows/windows-tester-prepare - - name: Build fuzzers/frida_gdiplus - run: cd fuzzers/frida_gdiplus/ && cargo make test && cargo make test_cmplog + - name: Build fuzzers/binary_only/frida_windows_gdiplus + run: cd fuzzers/binary_only/frida_windows_gdiplus/ && cargo make test && cargo make test_cmplog windows-tinyinst-simple: runs-on: windows-latest needs: - common steps: - - uses: actions/checkout@v3 - - uses: ./.github/workflows/windows-tester-prepare - name: install cxx bridge run: cargo install cxxbridge-cmd - - name: Build fuzzers/tinyinst_simple - run: cd fuzzers/tinyinst_simple/ && cargo make test + - uses: actions/checkout@v4 + - uses: ./.github/workflows/windows-tester-prepare + - name: Build fuzzers/binary_only/tinyinst_simple + run: cd fuzzers/binary_only/tinyinst_simple/ && cargo make test windows-clippy: runs-on: windows-latest needs: - common steps: - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - uses: actions/checkout@v3 - - uses: ./.github/workflows/windows-tester-prepare + - uses: dtolnay/rust-toolchain@stable + - uses: actions/checkout@v4 + - uses: ./.github/workflows/windows-tester-prepare - uses: Swatinem/rust-cache@v2 - name: Run real clippy, not the fake one shell: pwsh @@ -487,17 +485,14 @@ jobs: macos: runs-on: macOS-latest steps: - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable + - uses: dtolnay/rust-toolchain@stable - name: Add nightly clippy run: rustup toolchain install nightly --component clippy --allow-downgrade && rustup default nightly - name: Install deps - run: brew install z3 gtk+3 + run: brew install z3 gtk+3 python - name: Install cxxbridge run: cargo install cxxbridge-cmd - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: Swatinem/rust-cache@v2 - name: MacOS Build run: cargo build --verbose @@ -511,24 +506,20 @@ jobs: ios: runs-on: macOS-latest steps: - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable + - uses: dtolnay/rust-toolchain@stable - name: install ios run: rustup target add aarch64-apple-ios - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: Swatinem/rust-cache@v2 - name: Build iOS - run: cargo build --target aarch64-apple-ios && cd libafl_frida && cargo build --target aarch64-apple-ios && cd .. + run: PYO3_CROSS_PYTHON_VERSION=$(python3 -c "print('{}.{}'.format(__import__('sys').version_info.major, __import__('sys').version_info.minor))") cargo build --target aarch64-apple-ios && cd libafl_frida && cargo build --target aarch64-apple-ios && cd .. android: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable + - name: Install curl + run: sudo apt-get update && sudo apt-get install clang + - uses: dtolnay/rust-toolchain@stable - uses: nttld/setup-ndk@v1 with: ndk-version: r25b @@ -536,10 +527,10 @@ jobs: run: rustup target add aarch64-linux-android - name: install cargo ndk run: cargo install cargo-ndk - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: Swatinem/rust-cache@v2 - name: Build Android - run: cd libafl && cargo ndk -t arm64-v8a build --release + run: cd libafl && PYO3_CROSS_PYTHON_VERSION=$(python3 -c "print('{}.{}'.format(__import__('sys').version_info.major, __import__('sys').version_info.minor))") cargo ndk -t arm64-v8a build --release #run: cargo build --target aarch64-linux-android # TODO: Figure out how to properly build stuff with clang @@ -550,36 +541,3 @@ jobs: # run: clang -v #- name: Windows Test # run: C:\Rust\.cargo\bin\cargo.exe test --verbose - - freebsd: - runs-on: ubuntu-22.04 - name: Simple build in FreeBSD - steps: - - uses: actions/checkout@v3 - - name: Test in FreeBSD - id: test - uses: vmactions/freebsd-vm@v1 - with: - usesh: true - sync: rsync - copyback: false - mem: 2048 - release: 13.2 - prepare: | - pkg install -y curl bash sudo llvm16 - curl https://sh.rustup.rs -sSf | sh -s -- -y - - run: | - freebsd-version - . "$HOME/.cargo/env" - rustup toolchain install nightly - export LLVM_CONFIG=/usr/local/bin/llvm-config16 - pwd - ls -lah - echo "local/bin" - ls -lah /usr/local/bin/ - which llvm-config - chmod +x ./scripts/clippy.sh - bash ./scripts/shmem_limits_fbsd.sh - bash ./scripts/clippy.sh - cargo test diff --git a/.github/workflows/fuzzer-tester-prepare/action.yml b/.github/workflows/fuzzer-tester-prepare/action.yml index f5acdf7cdd..fc598ee75f 100644 --- a/.github/workflows/fuzzer-tester-prepare/action.yml +++ b/.github/workflows/fuzzer-tester-prepare/action.yml @@ -3,63 +3,38 @@ description: Sets up the Rust environment for the CI workflow runs: using: composite steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: submodules: true fetch-depth: 0 + - uses: ./.github/workflows/ubuntu-prepare - uses: Swatinem/rust-cache@v2 with: { shared-key: "${{ runner.os }}-shared-fuzzer-cache" } - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - name: Add stable clippy + - name: Install fuzzers deps shell: bash - run: rustup toolchain install stable --component clippy --allow-downgrade - - name: Add nightly clippy + run: sudo apt-get update && sudo apt-get install -y nasm nlohmann-json3-dev gcc-aarch64-linux-gnu g++-aarch64-linux-gnu gcc-mipsel-linux-gnu g++-mipsel-linux-gnu gcc-powerpc-linux-gnu g++-powerpc-linux-gnu libc6-dev-i386-cross libc6-dev libc6-dev-i386 lib32gcc-11-dev lib32stdc++-11-dev libgtk-3-dev pax-utils python3-msgpack python3-jinja2 + - name: enable mult-thread for `make` shell: bash - run: rustup toolchain install nightly --component clippy --allow-downgrade + run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)" - name: Add no_std toolchain shell: bash run: rustup toolchain install nightly-x86_64-unknown-linux-gnu ; rustup component add rust-src --toolchain nightly-x86_64-unknown-linux-gnu - name: Add wasm target shell: bash run: rustup target add wasm32-unknown-unknown - - name: Remove obsolete llvm (Linux) - if: runner.os == 'Linux' - shell: bash - run: sudo apt purge -y llvm* clang* - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v2 - with: - directory: ${{ runner.temp }}/llvm - version: 17 - - name: Install deps - shell: bash - run: sudo apt update && sudo apt install -y nasm nlohmann-json3-dev ninja-build gcc-arm-linux-gnueabi g++-arm-linux-gnueabi gcc-aarch64-linux-gnu g++-aarch64-linux-gnu gcc-mipsel-linux-gnu g++-mipsel-linux-gnu gcc-powerpc-linux-gnu g++-powerpc-linux-gnu libc6-dev-i386-cross libc6-dev libc6-dev-i386 lib32gcc-11-dev lib32stdc++-11-dev libgtk-3-dev pax-utils libz3-dev - - name: pip install - shell: bash - run: python3 -m pip install msgpack jinja2 find_libpython - - name: enable mult-thread for `make` - shell: bash - run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)" - name: install cargo-make - uses: baptiste0928/cargo-install@v1.3.0 + uses: baptiste0928/cargo-install@v3 with: crate: cargo-make - name: install wasm-pack - uses: baptiste0928/cargo-install@v1.3.0 + uses: baptiste0928/cargo-install@v3 with: crate: wasm-pack - name: install cxxbridge-cmd - uses: baptiste0928/cargo-install@v1.3.0 + uses: baptiste0928/cargo-install@v3 with: crate: cxxbridge-cmd - name: install chrome uses: browser-actions/setup-chrome@v1 with: chrome-version: stable - - name: Symlink Headers - if: runner.os == 'Linux' - shell: bash - run: sudo ln -s /usr/include/asm-generic /usr/include/asm diff --git a/.github/workflows/qemu-fuzzer-tester-prepare/action.yml b/.github/workflows/qemu-fuzzer-tester-prepare/action.yml index 23a1af3a2a..adbf97ad50 100644 --- a/.github/workflows/qemu-fuzzer-tester-prepare/action.yml +++ b/.github/workflows/qemu-fuzzer-tester-prepare/action.yml @@ -3,45 +3,21 @@ description: Sets up the QEMU fuzzers environment runs: using: composite steps: - - uses: actions/checkout@v3 - with: - submodules: true - fetch-depth: 0 - - name: Install deps + - name: Install QEMU deps shell: bash - run: apt update && apt install -y nasm ninja-build libc6-dev libgtk-3-dev pax-utils libz3-dev wget qemu-utils libsqlite3-dev gcc-arm-none-eabi sudo gcc g++ build-essential gcc-arm-linux-gnueabi g++-arm-linux-gnueabi - - uses: Swatinem/rust-cache@v2 - with: { shared-key: "${{ runner.os }}-shared-fuzzer-cache" } - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - - name: Add stable clippy - shell: bash - run: rustup toolchain install stable --component clippy --allow-downgrade - - name: Add nightly clippy - shell: bash - run: rustup toolchain install nightly --component clippy --allow-downgrade - - name: Remove obsolete llvm (Linux) - if: runner.os == 'Linux' - shell: bash - run: sudo apt purge -y llvm* clang* - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v2 - with: - directory: ${{ runner.temp }}/llvm - version: 17 - - name: pip install - shell: bash - run: python3 -m pip install msgpack jinja2 find_libpython + run: apt-get update && apt-get install -y qemu-utils sudo python3-msgpack python3-jinja2 curl python3-dev + - uses: dtolnay/rust-toolchain@stable - name: enable mult-thread for `make` shell: bash run: export MAKEFLAGS="-j$(expr $(nproc) \+ 1)" - name: install cargo-make - uses: baptiste0928/cargo-install@v1.3.0 + uses: baptiste0928/cargo-install@v3 with: crate: cargo-make - - name: Symlink Headers - if: runner.os == 'Linux' - shell: bash - run: sudo ln -s /usr/include/asm-generic /usr/include/asm + - uses: actions/checkout@v4 + with: + submodules: true + fetch-depth: 0 + - uses: ./.github/workflows/ubuntu-prepare + - uses: Swatinem/rust-cache@v2 + with: { shared-key: "${{ runner.os }}-shared-fuzzer-cache" } diff --git a/.github/workflows/ubuntu-prepare/action.yml b/.github/workflows/ubuntu-prepare/action.yml index 9883b892f1..a1f5e439a7 100644 --- a/.github/workflows/ubuntu-prepare/action.yml +++ b/.github/workflows/ubuntu-prepare/action.yml @@ -3,25 +3,34 @@ description: Sets up the Rust environment for the CI workflow runs: using: composite steps: - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - components: llvm-tools - - name: Remove existing clang and LLVM - shell: bash - run: sudo apt purge llvm* clang* - name: Install and cache deps shell: bash - run: sudo apt update && sudo apt install ninja-build clang-format shellcheck libgtk-3-dev gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev libz3-dev + run: sudo apt-get update && sudo apt-get install -y curl lsb-release wget software-properties-common gnupg ninja-build shellcheck pax-utils nasm libsqlite3-dev libc6-dev libgtk-3-dev gcc g++ gcc-arm-none-eabi gcc-arm-linux-gnueabi g++-arm-linux-gnueabi libslirp-dev libz3-dev build-essential cmake + - uses: dtolnay/rust-toolchain@stable + - name: Add stable clippy + shell: bash + run: rustup toolchain install stable --component clippy --allow-downgrade + - name: Add nightly clippy + shell: bash + run: rustup toolchain install nightly --component clippy --allow-downgrade + - name: Remove existing clang and LLVM + shell: bash + run: sudo apt-get purge -y *llvm* *clang* lld* lldb* opt* - name: Install cargo-hack shell: bash run: curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-x86_64-unknown-linux-gnu.tar.gz | tar xzf - -C ~/.cargo/bin - name: Add nightly shell: bash run: rustup toolchain install nightly --allow-downgrade - - name: Install LLVM and Clang - uses: KyleMayes/install-llvm-action@v2 - with: - directory: ${{ runner.temp }}/llvm - version: 17 \ No newline at end of file + - name: Default to nightly + shell: bash + run: rustup default nightly + - name: Add LLVM in sources list + shell: bash + run: | + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh ${{env.MAIN_LLVM_VERSION}} all + - name: Symlink Headers + shell: bash + run: sudo ln -s /usr/include/asm-generic /usr/include/asm diff --git a/.github/workflows/windows-tester-prepare/action.yml b/.github/workflows/windows-tester-prepare/action.yml index 79e2f3c02c..a76f1a89e6 100644 --- a/.github/workflows/windows-tester-prepare/action.yml +++ b/.github/workflows/windows-tester-prepare/action.yml @@ -3,11 +3,10 @@ description: Sets up the Rust environment for the CI workflow runs: using: composite steps: - - uses: actions-rs/toolchain@v1 + - uses: dtolnay/rust-toolchain@nightly with: - profile: minimal - toolchain: stable - - uses: actions/checkout@v3 + components: llvm-tools, clippy, rustfmt + - uses: actions/checkout@v4 - uses: Swatinem/rust-cache@v2 - name: Build docs shell: pwsh @@ -18,4 +17,4 @@ runs: run: echo "LIBCLANG_PATH=$((gcm clang).source -replace "clang.exe")" >> $env:GITHUB_ENV - name: install cargo-make shell: pwsh - run: cargo install --force cargo-make \ No newline at end of file + run: cargo install --force cargo-make diff --git a/.gitignore b/.gitignore index c1b1522943..ea92a630b0 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,9 @@ vendor .DS_Store .env +.vscode +*.test *.tmp *.swp *.o @@ -17,6 +19,7 @@ vendor *.bin *.dll *.exe +*.dylib *.dSYM *.obj @@ -31,17 +34,15 @@ callgrind.out.* perf.data perf.data.old -.vscode +.vscode/settings.json test.dict .idea/ # Ignore all built fuzzers -fuzzer_* AFLplusplus test_* *_fuzzer -*_harness # Ignore common dummy and logfiles *.log @@ -72,6 +73,5 @@ libafl_nyx/packer # common harness names harness program -fuzzer fuzzer_libpng* forkserver_simple diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 135475d8d4..7e65ffc9f8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,4 +6,8 @@ repos: - id: fmt name: fmt entry: scripts/fmt_all.sh check - language: script \ No newline at end of file + language: script +- repo: https://github.com/ComPWA/taplo-pre-commit + rev: v0.9.3 + hooks: + - id: taplo-format diff --git a/.vscode/settings.json.default b/.vscode/settings.json.default new file mode 100644 index 0000000000..59ac7c1a66 --- /dev/null +++ b/.vscode/settings.json.default @@ -0,0 +1,12 @@ +{ + "rust-analyzer.cargo.buildScripts.overrideCommand": [ + "cargo", + "check", + "--message-format=json", + ], + "rust-analyzer.check.overrideCommand": [ + "cargo", + "check", + "--message-format=json", + ] +} \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9dd4eb1800..81f90cdc48 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,15 +4,17 @@ For bugs, feel free to open issues or contact us directly. Thank you for your su ## Pull Request guideline -Even though we will gladly assist you in finishing up your PR, try to +Even though we will gladly assist you in finishing up your PR, try to: + - keep all the crates compiling with *stable* rust (hide the eventual non-stable code under [`cfg`s](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/build.rs#L26)) - run `cargo +nightly fmt` on your code before pushing -- check the output of `cargo clippy --all` or `./clippy.sh` +- check the output of `cargo clippy --all` or `./scripts/clippy.sh` (On windows use `.\scripts\clippy.ps1`) - run `cargo build --no-default-features` to check for `no_std` compatibility (and possibly add `#[cfg(feature = "std")]`) to hide parts of your code. +- Please add and describe your changes to MIGRATION.md if you change the APIs. Some of the parts in this list may be hard, don't be afraid to open a PR if you cannot fix them by yourself, so we can help. ### Pre-commit hooks Some of these checks can be performed automatically during commit using [pre-commit](https://pre-commit.com/). -Once the package is installed, simply run `pre-commit install` to enable the hooks, the checks will run automatically before the commit becomes effective. \ No newline at end of file +Once the package is installed, simply run `pre-commit install` to enable the hooks, the checks will run automatically before the commit becomes effective. diff --git a/Cargo.toml b/Cargo.toml index 88fe52fe5c..0014cb4303 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,46 +1,164 @@ [workspace] resolver = "2" members = [ - "libafl", - "libafl_bolts", - "libafl_cc", - "libafl_concolic/symcc_runtime", - "libafl_concolic/symcc_libafl", - "libafl_concolic/test/dump_constraints", - "libafl_concolic/test/runtime_test", - "libafl_derive", - "libafl_frida", - "libafl_libfuzzer", - "libafl_nyx", - "libafl_qemu", - "libafl_sugar", - "libafl_targets", - "libafl_tinyinst", - "utils/build_and_test_fuzzers", - "utils/deexit", - "utils/libafl_benches", - "utils/gramatron/construct_automata", + "libafl", + "libafl_bolts", + "libafl_cc", + "libafl_concolic/symcc_runtime", + "libafl_concolic/symcc_libafl", + "libafl_derive", + "libafl_frida", + "libafl_intelpt", + "libafl_libfuzzer", + "libafl_nyx", + "libafl_targets", + "libafl_tinyinst", + "libafl_qemu", + "libafl_qemu/libafl_qemu_build", + "libafl_qemu/libafl_qemu_sys", + "libafl_sugar", + "libafl_concolic/test/dump_constraints", + "libafl_concolic/test/runtime_test", + "utils/build_and_test_fuzzers", + "utils/deexit", + "utils/drcov_utils", + "utils/gramatron/construct_automata", + "utils/libafl_benches", + "utils/libafl_jumper", + "bindings/pylibafl", ] default-members = [ - "libafl", - "libafl_bolts", - "libafl_cc", - "libafl_derive", - "libafl_targets", + "libafl", + "libafl_bolts", + "libafl_cc", + "libafl_derive", + "libafl_targets", ] + exclude = [ - "bindings", - "fuzzers", - "libafl_qemu/libafl_qemu_build", - "libafl_qemu/libafl_qemu_sys", - "utils/noaslr", - "utils/gdb_qemu", - "utils/libafl_fmt", - "scripts", + "fuzzers", + "libafl_libfuzzer_runtime", + "utils/noaslr", + "utils/gdb_qemu", + "utils/libafl_fmt", + "utils/desyscall", + "utils/multi_machine_generator", + "scripts", + # additional crates + "libafl_concolic/test/symcc/util/symcc_fuzzing_helper", ] [workspace.package] -version = "0.13.0" +version = "0.14.1" +license = "MIT OR Apache-2.0" + +[workspace.dependencies] +# Internal deps +libafl = { path = "./libafl", version = "0.14.1", default-features = false } +libafl_bolts = { path = "./libafl_bolts", version = "0.14.1", default-features = false } +libafl_cc = { path = "./libafl_cc", version = "0.14.1", default-features = false } +symcc_runtime = { path = "./libafl_concolic/symcc_runtime", version = "0.14.1", default-features = false } +symcc_libafl = { path = "./libafl_concolic/symcc_libafl", version = "0.14.1", default-features = false } +libafl_derive = { path = "./libafl_derive", version = "0.14.1", default-features = false } +libafl_frida = { path = "./libafl_frida", version = "0.14.1", default-features = false } +libafl_intelpt = { path = "./libafl_intelpt", version = "0.14.1", default-features = false } +libafl_libfuzzer = { path = "./libafl_libfuzzer", version = "0.14.1", default-features = false } +libafl_nyx = { path = "./libafl_nyx", version = "0.14.1", default-features = false } +libafl_targets = { path = "./libafl_targets", version = "0.14.1", default-features = false } +libafl_tinyinst = { path = "./libafl_tinyinst", version = "0.14.1", default-features = false } +libafl_qemu = { path = "./libafl_qemu", version = "0.14.1", default-features = false } +libafl_qemu_build = { path = "./libafl_qemu/libafl_qemu_build", version = "0.14.1", default-features = false } +libafl_qemu_sys = { path = "./libafl_qemu/libafl_qemu_sys", version = "0.14.1", default-features = false } +libafl_sugar = { path = "./libafl_sugar", version = "0.14.1", default-features = false } +dump_constraints = { path = "./libafl_concolic/test/dump_constraints", version = "0.14.1", default-features = false } +runtime_test = { path = "./libafl_concolic/test/runtime_test", version = "0.14.1", default-features = false } +build_and_test_fuzzers = { path = "./utils/build_and_test_fuzzers", version = "0.14.1", default-features = false } +deexit = { path = "./utils/deexit", version = "0.14.1", default-features = false } +drcov_utils = { path = "./utils/drcov_utils", version = "0.14.1", default-features = false } +construct_automata = { path = "./utils/gramatron/construct_automata", version = "0.14.1", default-features = false } +libafl_benches = { path = "./utils/libafl_benches", version = "0.14.1", default-features = false } +libafl_jumper = { path = "./utils/libafl_jumper", version = "0.14.1", default-features = false } + +# External deps +ahash = { version = "0.8.11", default-features = false } # The hash function already used in hashbrown +arbitrary-int = "1.2.7" # arbitrary sized integers, useful in combination with bitfields (bitbybit crate) +backtrace = { version = "0.3.74", default-features = false } # Used to get the stacktrace in StacktraceObserver +bindgen = "0.70.1" +bitbybit = "1.3.2" # bitfields, use this for bit fields and bit enums +clap = "4.5.18" +cc = "1.1.21" +cmake = "0.1.51" +document-features = "0.2.10" +hashbrown = { version = "0.14.5", default-features = false } # A faster hashmap, nostd compatible +libc = "0.2.159" # For (*nix) libc +libipt = "0.2.0" +log = "0.4.22" +meminterval = "0.4.1" +mimalloc = { version = "0.1.43", default-features = false } +nix = { version = "0.29.0", default-features = false } +num_enum = { version = "0.7.3", default-features = false } +num-traits = { version = "0.2.19", default-features = false } +paste = "1.0.15" +postcard = { version = "1.0.10", features = [ + "alloc", +], default-features = false } # no_std compatible serde serialization format +pyo3 = "0.23.2" +pyo3-build-config = "0.23.2" +rangemap = "1.5.1" +regex = "1.10.6" +rustversion = "1.0.17" +serde = { version = "1.0.210", default-features = false } # serialization lib +serial_test = { version = "3.1.1", default-features = false } +serde_json = { version = "1.0.128", default-features = false } +serde_yaml = { version = "0.9.34" } # For parsing the injections yaml file +static_assertions = "1.1.0" +strum = "0.26.3" +strum_macros = "0.26.4" +toml = "0.8.19" # For parsing the injections toml file +typed-builder = "0.20.0" # Implement the builder pattern at compiletime +uuid = { version = "1.10.0", features = ["serde", "v4"] } +which = "6.0.3" +windows = "0.58.0" +z3 = "0.12.1" + + +[workspace.lints.rust] +# Forbid +unexpected_cfgs = "forbid" + +# Allow +incomplete_features = "allow" +ambiguous_glob_reexports = "allow" + + +[workspace.lints.clippy] +# Deny +all = { level = "deny", priority = -1 } +pedantic = { level = "deny", priority = -1 } +cargo_common_metadata = "deny" + +# Warn +cargo = { level = "warn", priority = -1 } +negative_feature_names = "warn" + +# Allow +unreadable_literal = "allow" +type_repetition_in_bounds = "allow" +missing_errors_doc = "allow" +cast_possible_truncation = "allow" +used_underscore_binding = "allow" +ptr_as_ptr = "allow" +missing_panics_doc = "allow" +module_name_repetitions = "allow" +unsafe_derive_deserialize = "allow" +similar_names = "allow" +too_many_lines = "allow" + + +[workspace.lints.rustdoc] +# Deny +broken_intra_doc_links = "deny" + [profile.release] lto = true diff --git a/Dockerfile b/Dockerfile index 128b722afa..1807afcc78 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,8 +3,10 @@ FROM rust:1.76.0 AS libafl LABEL "maintainer"="afl++ team " LABEL "about"="LibAFL Docker image" +# Install cargo-binstall to download the sccache build +RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash # install sccache to cache subsequent builds of dependencies -RUN cargo install --locked sccache +RUN cargo binstall --no-confirm sccache ENV HOME=/root ENV SCCACHE_CACHE_SIZE="1G" @@ -22,12 +24,11 @@ RUN rustup component add rustfmt clippy # Install clang 18, common build tools ENV LLVM_VERSION=18 RUN apt update && apt install -y build-essential gdb git wget python3-venv ninja-build lsb-release software-properties-common gnupg cmake -# Workaround until https://github.com/llvm/llvm-project/issues/62475 is resolved RUN set -ex &&\ - echo "deb http://apt.llvm.org/bookworm/ llvm-toolchain-bookworm-${LLVM_VERSION} main" > /etc/apt/sources.list.d/apt.llvm.org.list &&\ - wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc &&\ - apt update &&\ - apt-get install -y clang-${LLVM_VERSION} lldb-${LLVM_VERSION} lld-${LLVM_VERSION} clangd-${LLVM_VERSION} clang-tidy-${LLVM_VERSION} clang-format-${LLVM_VERSION} clang-tools-${LLVM_VERSION} llvm-${LLVM_VERSION}-dev lld-${LLVM_VERSION} lldb-${LLVM_VERSION} llvm-${LLVM_VERSION}-tools libomp-${LLVM_VERSION}-dev libc++-${LLVM_VERSION}-dev libc++abi-${LLVM_VERSION}-dev libclang-common-${LLVM_VERSION}-dev libclang-${LLVM_VERSION}-dev libclang-cpp${LLVM_VERSION}-dev libunwind-${LLVM_VERSION}-dev libclang-rt-${LLVM_VERSION}-dev libpolly-${LLVM_VERSION}-dev + wget https://apt.llvm.org/llvm.sh &&\ + chmod +x llvm.sh &&\ + ./llvm.sh ${LLVM_VERSION} + # Copy a dummy.rs and Cargo.toml first, so that dependencies are cached WORKDIR /libafl @@ -39,6 +40,10 @@ COPY scripts/dummy.rs libafl_derive/src/lib.rs COPY libafl/Cargo.toml libafl/build.rs libafl/README.md libafl/ COPY scripts/dummy.rs libafl/src/lib.rs +# Set up LLVM aliases +COPY scripts/createAliases.sh libafl/ +RUN bash libafl/createAliases.sh ${LLVM_VERSION} + COPY libafl_bolts/Cargo.toml libafl_bolts/build.rs libafl_bolts/README.md libafl_bolts/ COPY libafl_bolts/examples libafl_bolts/examples COPY scripts/dummy.rs libafl_bolts/src/lib.rs @@ -47,6 +52,9 @@ COPY libafl_frida/Cargo.toml libafl_frida/build.rs libafl_frida/ COPY scripts/dummy.rs libafl_frida/src/lib.rs COPY libafl_frida/src/gettls.c libafl_frida/src/gettls.c +COPY libafl_intelpt/Cargo.toml libafl_intelpt/README.md libafl_intelpt/ +COPY scripts/dummy.rs libafl_intelpt/src/lib.rs + COPY libafl_qemu/Cargo.toml libafl_qemu/build.rs libafl_qemu/build_linux.rs libafl_qemu/ COPY scripts/dummy.rs libafl_qemu/src/lib.rs @@ -59,6 +67,9 @@ COPY scripts/dummy.rs libafl_qemu/libafl_qemu_sys/src/lib.rs COPY libafl_sugar/Cargo.toml libafl_sugar/ COPY scripts/dummy.rs libafl_sugar/src/lib.rs +COPY bindings/pylibafl/Cargo.toml bindings/pylibafl/Cargo.toml +COPY bindings/pylibafl/src bindings/pylibafl/src + COPY libafl_cc/Cargo.toml libafl_cc/Cargo.toml COPY libafl_cc/build.rs libafl_cc/build.rs COPY libafl_cc/src libafl_cc/src @@ -131,10 +142,13 @@ COPY libafl_concolic/symcc_runtime libafl_concolic/symcc_runtime COPY libafl_concolic/test libafl_concolic/test COPY libafl_nyx/src libafl_nyx/src RUN touch libafl_nyx/src/lib.rs +COPY libafl_libfuzzer_runtime libafl_libfuzzer_runtime COPY libafl_libfuzzer/src libafl_libfuzzer/src -COPY libafl_libfuzzer/libafl_libfuzzer_runtime libafl_libfuzzer/libafl_libfuzzer_runtime +COPY libafl_libfuzzer/runtime libafl_libfuzzer/runtime COPY libafl_libfuzzer/build.rs libafl_libfuzzer/build.rs RUN touch libafl_libfuzzer/src/lib.rs +COPY libafl_intelpt/src libafl_intelpt/src +RUN touch libafl_intelpt/src/lib.rs RUN cargo build && cargo build --release # Copy fuzzers over diff --git a/MIGRATION.md b/MIGRATION.md new file mode 100644 index 0000000000..274c48371e --- /dev/null +++ b/MIGRATION.md @@ -0,0 +1,6 @@ +# Pre 0.9 -> 0.9 +- [Migrating from LibAFL <0.9 to 0.9](https://aflplus.plus/libafl-book/design/migration-0.9.html) + +# 0.14.0 -> 0.14.1 +- Removed `with_observers` from `Executor` trait. +- `MmapShMemProvider::new_shmem_persistent` has been removed in favour of `MmapShMem::persist`. You probably want to do something like this: `let shmem = MmapShMemProvider::new()?.new_shmem(size)?.persist()?;` diff --git a/README.md b/README.md index bdc1d9e9ae..f9053be7f1 100644 --- a/README.md +++ b/README.md @@ -4,17 +4,7 @@ Advanced Fuzzing Library - Slot your own fuzzers together and extend their features using Rust. -LibAFL is written and maintained by - - * [Andrea Fioraldi](https://twitter.com/andreafioraldi) - * [Dominik Maier](https://twitter.com/domenuk) - * [s1341](https://twitter.com/srubenst1341) - * [Dongjia Zhang](https://github.com/tokatoka) - * [Addison Crump](https://github.com/addisoncrump) - -## Why LibAFL? - -LibAFL gives you many of the benefits of an off-the-shelf fuzzer, while being completely customizable. +LibAFL is a collection of reusable pieces of fuzzers, written in Rust, it gives you many of the benefits of an off-the-shelf fuzzer, while being completely customizable. Some highlight features currently include: - `fast`: We do everything we can at compile time, keeping runtime overhead minimal. Users reach 120k execs/sec in frida-mode on a phone (using all cores). - `scalable`: `Low Level Message Passing`, `LLMP` for short, allows LibAFL to scale almost linearly over cores, and via TCP to multiple machines. @@ -23,103 +13,74 @@ feel free to add an AST-based input for structured fuzzing, and more. - `multi platform`: LibAFL was confirmed to work on *Windows*, *MacOS*, *Linux*, and *Android* on *x86_64* and *aarch64*. `LibAFL` can be built in `no_std` mode to inject LibAFL into obscure targets like embedded devices and hypervisors. - `bring your own target`: We support binary-only modes, like Frida-Mode, as well as multiple compilation passes for sourced-based instrumentation. Of course it's easy to add custom instrumentation backends. -## Overview +## Core concepts -LibAFL is a collection of reusable pieces of fuzzers, written in Rust. -It is fast, multi-platform, no_std compatible, and scales over cores and machines. +LibAFL is fast, multi-platform, no_std compatible, and scales over cores and machines. It offers a main crate that provide building blocks for custom fuzzers, [libafl](./libafl), a library containing common code that can be used for targets instrumentation, [libafl_targets](./libafl_targets), and a library providing facilities to wrap compilers, [libafl_cc](./libafl_cc). It offers integrations with popular instrumentation frameworks. At the moment, the supported backends are: ++ `SanitizerCoverage`, in [libafl_targets](./libafl_targets) ++ `Frida`, in [libafl_frida](./libafl_frida) ++ `QEMU` user-mode and system mode, including hooks for emulation, in [libafl_qemu](./libafl_qemu) ++ `TinyInst`, in [libafl_tinyinst](./libafl_tinyinst) by [elbiazo](https://github.com/elbiazo) -It offers a main crate that provide building blocks for custom fuzzers, [libafl](./libafl), a library containing common code that can be used for targets instrumentation, [libafl_targets](./libafl_targets), and a library providing facilities to wrap compilers, [libafl_cc](./libafl_cc). - -LibAFL offers integrations with popular instrumentation frameworks. At the moment, the supported backends are: - -+ SanitizerCoverage, in [libafl_targets](./libafl_targets) -+ Frida, in [libafl_frida](./libafl_frida) -+ QEMU user-mode and system mode, including hooks for emulation, in [libafl_qemu](./libafl_qemu) -+ TinyInst, in [libafl_tinyinst](./libafl_tinyinst) by [elbiazo](https://github.com/elbiazo) - -## Getting started - -1. Install the Dependecies -- The Rust development language. -We highly recommend *not* to use e.g. your Linux distribition package as this is likely outdated. So rather install -Rust directly, instructions can be found [here](https://www.rust-lang.org/tools/install). - -- LLVM tools -The LLVM tools (including clang, clang++) are needed (newer than LLVM 15.0.0 up to LLVM 18.1.3) -If you are using Debian/Ubuntu, again, we highly recommmend that you install the package from [here](https://apt.llvm.org/) - -(In `libafl_concolic`, we only support LLVM version newer than 18) - -- Cargo-make -We use cargo-make to build the fuzzers in `fuzzers/` directory. You can install it with - -```sh -cargo install cargo-make -``` - -2. Clone the LibAFL repository with +## Building and installing +#### Install the Dependencies +- **The Rust development language** + - We highly recommend *not* to use e.g. your Linux distribution package as this is likely outdated. So rather install Rust directly, instructions can be found [here](https://www.rust-lang.org/tools/install). +- **LLVM tools** + - The LLVM tools (including clang, clang++) are needed (newer than LLVM 15.0.0 up to LLVM 18.1.3) If you are using Debian/Ubuntu, again, we highly recommmend that you install the package from [here](https://apt.llvm.org/) + - (In `libafl_concolic`, we only support LLVM version newer than 18) +- Cargo-make: + - We use cargo-make to build the fuzzers in `fuzzers/` directory. You can install it with `cargo install cargo-make` + +#### Clone the LibAFL repository with ```sh git clone https://github.com/AFLplusplus/LibAFL ``` - -3. Build the library using - +#### Build the library using ```sh cargo build --release ``` - -4. Build the API documentation with - +#### Build the API documentation with ```sh cargo doc ``` - -5. Browse the LibAFL book (WIP!) with (requires [mdbook](https://rust-lang.github.io/mdBook/index.html)) - +#### Browse the LibAFL book (WIP!) with (requires [mdbook](https://rust-lang.github.io/mdBook/index.html)) ```sh cd docs && mdbook serve ``` - +## Getting started We collect all example fuzzers in [`./fuzzers`](./fuzzers/). Be sure to read their documentation (and source), this is *the natural way to get started!* - -You can run each example fuzzer with - ```sh cargo make run ``` +You can run each example fuzzer with this following command, as long as the fuzzer directory has `Makefile.toml` file. The best-tested fuzzer is [`./fuzzers/inprocess/libfuzzer_libpng`](./fuzzers/inprocess/libfuzzer_libpng), a multicore libfuzzer-like fuzzer using LibAFL for a libpng harness. -as long as the fuzzer directory has `Makefile.toml` file. +### Resources +- [Installation guide](./docs/src/getting_started/setup.md) +- [Online API documentation](https://docs.rs/libafl/) +- The LibAFL book (WIP) [online](https://aflplus.plus/libafl-book) or in the [repo](./docs/src/) +- Our research [paper](https://www.s3.eurecom.fr/docs/ccs22_fioraldi.pdf) +- Our RC3 [talk](http://www.youtube.com/watch?v=3RWkT1Q5IV0 "Fuzzers Like LEGO") explaining the core concepts +- Our Fuzzcon Europe [talk](https://www.youtube.com/watch?v=PWB8GIhFAaI "LibAFL: The Advanced Fuzzing Library") with a (a bit but not so much outdated) step-by-step discussion on how to build some example fuzzers +- The Fuzzing101 [solutions](https://github.com/epi052/fuzzing-101-solutions) & series of [blog posts](https://epi052.gitlab.io/notes-to-self/blog/2021-11-01-fuzzing-101-with-libafl/) by [epi](https://github.com/epi052) +- Blogpost on binary-only fuzzing lib libaf_qemu, [Hacking TMNF - Fuzzing the game server](https://blog.bricked.tech/posts/tmnf/part1/), by [RickdeJager](https://github.com/RickdeJager). +- [A LibAFL Introductory Workshop](https://www.atredis.com/blog/2023/12/4/a-libafl-introductory-workshop), by [Jordan Whitehead](https://github.com/jordan9001) -The best-tested fuzzer is [`./fuzzers/libfuzzer_libpng`](./fuzzers/libfuzzer_libpng), a multicore libfuzzer-like fuzzer using LibAFL for a libpng harness. +## Contributors -## Resources +LibAFL is written and maintained by -+ [Installation guide](./docs/src/getting_started/setup.md) - -+ [Online API documentation](https://docs.rs/libafl/) - -+ The LibAFL book (WIP) [online](https://aflplus.plus/libafl-book) or in the [repo](./docs/src/) - -+ Our research [paper](https://www.s3.eurecom.fr/docs/ccs22_fioraldi.pdf) - -+ Our RC3 [talk](http://www.youtube.com/watch?v=3RWkT1Q5IV0 "Fuzzers Like LEGO") explaining the core concepts - -+ Our Fuzzcon Europe [talk](https://www.youtube.com/watch?v=PWB8GIhFAaI "LibAFL: The Advanced Fuzzing Library") with a (a bit but not so much outdated) step-by-step discussion on how to build some example fuzzers - -+ The Fuzzing101 [solutions](https://github.com/epi052/fuzzing-101-solutions) & series of [blog posts](https://epi052.gitlab.io/notes-to-self/blog/2021-11-01-fuzzing-101-with-libafl/) by [epi](https://github.com/epi052) - -+ Blogpost on binary-only fuzzing lib libaf_qemu, [Hacking TMNF - Fuzzing the game server](https://blog.bricked.tech/posts/tmnf/part1/), by [RickdeJager](https://github.com/RickdeJager). - -+ [A LibAFL Introductory Workshop](https://www.atredis.com/blog/2023/12/4/a-libafl-introductory-workshop), by [Jordan Whitehead](https://github.com/jordan9001) - -## Contributing + * [Andrea Fioraldi](https://twitter.com/andreafioraldi) + * [Dominik Maier](https://twitter.com/domenuk) + * [s1341](https://twitter.com/srubenst1341) + * [Dongjia Zhang](https://github.com/tokatoka) + * [Addison Crump](https://github.com/addisoncrump) + * [Romain Malmain](https://github.com/rmalmain) Please check out [CONTRIBUTING.md](CONTRIBUTING.md) for the contributing guideline. ## Cite - If you use LibAFL for your academic work, please cite the following paper: ```bibtex diff --git a/bindings/pylibafl/Cargo.toml b/bindings/pylibafl/Cargo.toml index 9efebdf6d4..6f3c1db320 100644 --- a/bindings/pylibafl/Cargo.toml +++ b/bindings/pylibafl/Cargo.toml @@ -1,19 +1,30 @@ [package] name = "pylibafl" -version = "0.13.0" +description = "Python bindings for LibAFL" +version = "0.14.1" +license = "MIT OR Apache-2.0" +repository = "https://github.com/AFLplusplus/LibAFL/" +keywords = ["fuzzing", "testing", "security", "python"] edition = "2021" +categories = ["development-tools::testing", "emulators", "embedded", "os"] [dependencies] -pyo3 = { version = "0.18.3", features = ["extension-module"] } -pyo3-log = "0.8.1" -libafl_sugar = { path = "../../libafl_sugar", version = "0.13.0", features = ["python"] } -libafl_bolts = { path = "../../libafl_bolts", version = "0.13.0", features = ["python"] } +pyo3 = { workspace = true, features = ["extension-module"] } +pyo3-log = { version = "0.12.0" } +libafl_sugar = { path = "../../libafl_sugar", version = "0.14.1", features = [ + "python", +] } +libafl_bolts = { path = "../../libafl_bolts", version = "0.14.1", features = [ + "python", +] } [target.'cfg(target_os = "linux")'.dependencies] -libafl_qemu = { path = "../../libafl_qemu", version = "0.13.0", features = ["python"] } +libafl_qemu = { path = "../../libafl_qemu", version = "0.14.1", features = [ + "python", +] } [build-dependencies] -pyo3-build-config = { version = "0.17" } +pyo3-build-config = { workspace = true } [lib] name = "pylibafl" diff --git a/bindings/pylibafl/pyproject.toml b/bindings/pylibafl/pyproject.toml index 7e62ce01e8..4fa0ba21e3 100644 --- a/bindings/pylibafl/pyproject.toml +++ b/bindings/pylibafl/pyproject.toml @@ -4,16 +4,16 @@ build-backend = "maturin" [project] name = "PyLibAFL" -version = "0.10.1" +version = "0.14.1" description = "Advanced Fuzzing Library for Python" readme = "README.md" requires-python = ">=3.8" -license = {text = "Apache-2.0"} +license = { text = "Apache-2.0" } classifiers = [ - "License :: OSI Approved :: Apache Software License", - "License :: OSI Approved :: MIT License", - "Programming Language :: Rust", - "Topic :: Security", + "License :: OSI Approved :: Apache Software License", + "License :: OSI Approved :: MIT License", + "Programming Language :: Rust", + "Topic :: Security", ] [project.urls] diff --git a/bindings/pylibafl/src/lib.rs b/bindings/pylibafl/src/lib.rs index 017dae5211..537c820dbd 100644 --- a/bindings/pylibafl/src/lib.rs +++ b/bindings/pylibafl/src/lib.rs @@ -6,27 +6,27 @@ use pyo3::prelude::*; /// Returns error if python libafl setup failed. #[pymodule] #[pyo3(name = "pylibafl")] -pub fn python_module(py: Python, m: &PyModule) -> PyResult<()> { +pub fn python_module(m: &Bound<'_, PyModule>) -> PyResult<()> { pyo3_log::init(); - let modules = py.import("sys")?.getattr("modules")?; + let modules = m.py().import("sys")?.getattr("modules")?; - let sugar_module = PyModule::new(py, "sugar")?; - libafl_sugar::python_module(py, sugar_module)?; - m.add_submodule(sugar_module)?; + let sugar_module = PyModule::new(m.py(), "sugar")?; + libafl_sugar::python_module(&sugar_module)?; + m.add_submodule(&sugar_module)?; modules.set_item("pylibafl.sugar", sugar_module)?; #[cfg(target_os = "linux")] { - let qemu_module = PyModule::new(py, "qemu")?; - libafl_qemu::python_module(py, qemu_module)?; - m.add_submodule(qemu_module)?; + let qemu_module = PyModule::new(m.py(), "qemu")?; + libafl_qemu::python_module(&qemu_module)?; + m.add_submodule(&qemu_module)?; modules.set_item("pylibafl.qemu", qemu_module)?; } - let bolts_module = PyModule::new(py, "libafl_bolts")?; - libafl_bolts::pybind::python_module(py, bolts_module)?; - m.add_submodule(bolts_module)?; + let bolts_module = PyModule::new(m.py(), "libafl_bolts")?; + libafl_bolts::pybind::python_module(&bolts_module)?; + m.add_submodule(&bolts_module)?; modules.set_item("pylibafl.libafl_bolts", bolts_module)?; Ok(()) diff --git a/bindings/pylibafl/test.py b/bindings/pylibafl/test.py index 41d90c9e3e..1ad3ef8c1e 100644 --- a/bindings/pylibafl/test.py +++ b/bindings/pylibafl/test.py @@ -3,5 +3,7 @@ import ctypes import platform print("Starting to fuzz from python!") -fuzzer = sugar.InMemoryBytesCoverageSugar(input_dirs=["./in"], output_dir="out", broker_port=1337, cores=[0,1]) -fuzzer.run(lambda b: print("foo")) \ No newline at end of file +fuzzer = sugar.InMemoryBytesCoverageSugar( + input_dirs=["./in"], output_dir="out", broker_port=1337, cores=[0, 1] +) +fuzzer.run(lambda b: print("foo")) diff --git a/docs/listings/baby_fuzzer/listing-01/Cargo.toml b/docs/listings/baby_fuzzer/listing-01/Cargo.toml index b50bb52532..345a38ada2 100644 --- a/docs/listings/baby_fuzzer/listing-01/Cargo.toml +++ b/docs/listings/baby_fuzzer/listing-01/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "baby_fuzzer_listing_01" -version = "0.1.0" +version = "0.14.1" authors = ["Your Name "] edition = "2018" diff --git a/docs/listings/baby_fuzzer/listing-02/Cargo.toml b/docs/listings/baby_fuzzer/listing-02/Cargo.toml index cd43b8af18..586a3e9860 100644 --- a/docs/listings/baby_fuzzer/listing-02/Cargo.toml +++ b/docs/listings/baby_fuzzer/listing-02/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "baby_fuzzer_listing_02" -version = "0.1.0" +version = "0.14.1" authors = ["Your Name "] edition = "2018" diff --git a/docs/listings/baby_fuzzer/listing-03/Cargo.toml b/docs/listings/baby_fuzzer/listing-03/Cargo.toml index cb12a810b0..abdf6ae8ad 100644 --- a/docs/listings/baby_fuzzer/listing-03/Cargo.toml +++ b/docs/listings/baby_fuzzer/listing-03/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "baby_fuzzer_listing_03" -version = "0.1.0" +version = "0.14.1" authors = ["Your Name "] edition = "2018" diff --git a/docs/listings/baby_fuzzer/listing-04/Cargo.toml b/docs/listings/baby_fuzzer/listing-04/Cargo.toml index a82804742a..70942eb1b8 100644 --- a/docs/listings/baby_fuzzer/listing-04/Cargo.toml +++ b/docs/listings/baby_fuzzer/listing-04/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "baby_fuzzer_listing_04" -version = "0.1.0" +version = "0.14.1" authors = ["Your Name "] edition = "2018" diff --git a/docs/listings/baby_fuzzer/listing-04/src/main.rs b/docs/listings/baby_fuzzer/listing-04/src/main.rs index dd64597244..84020b5e8c 100644 --- a/docs/listings/baby_fuzzer/listing-04/src/main.rs +++ b/docs/listings/baby_fuzzer/listing-04/src/main.rs @@ -3,7 +3,6 @@ extern crate libafl; extern crate libafl_bolts; use std::path::PathBuf; - use libafl::{ corpus::{InMemoryCorpus, OnDiskCorpus}, events::SimpleEventManager, @@ -15,7 +14,7 @@ use libafl::{ schedulers::QueueScheduler, state::StdState, }; -use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice}; +use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice, nonzero}; /* ANCHOR_END: use */ fn main() { @@ -77,7 +76,7 @@ fn main() { /* ANCHOR: generator */ // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/docs/listings/baby_fuzzer/listing-05/Cargo.toml b/docs/listings/baby_fuzzer/listing-05/Cargo.toml index b725a6e863..a91cd70a6d 100644 --- a/docs/listings/baby_fuzzer/listing-05/Cargo.toml +++ b/docs/listings/baby_fuzzer/listing-05/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "baby_fuzzer_listing_05" -version = "0.1.0" +version = "0.14.1" authors = ["Your Name "] edition = "2018" diff --git a/docs/listings/baby_fuzzer/listing-05/src/main.rs b/docs/listings/baby_fuzzer/listing-05/src/main.rs index 1377243fb7..d94710986a 100644 --- a/docs/listings/baby_fuzzer/listing-05/src/main.rs +++ b/docs/listings/baby_fuzzer/listing-05/src/main.rs @@ -15,7 +15,7 @@ use libafl::{ schedulers::QueueScheduler, state::StdState, }; -use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice}; +use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice, nonzero}; use std::path::PathBuf; /* ANCHOR_END: use */ @@ -105,7 +105,7 @@ fn main() { /* ANCHOR_END: executor_with_observer */ // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/docs/listings/baby_fuzzer/listing-06/Cargo.toml b/docs/listings/baby_fuzzer/listing-06/Cargo.toml index a5de46326b..88842caea1 100644 --- a/docs/listings/baby_fuzzer/listing-06/Cargo.toml +++ b/docs/listings/baby_fuzzer/listing-06/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "baby_fuzzer_listing_06" -version = "0.1.0" +version = "0.14.1" authors = ["Your Name "] edition = "2018" diff --git a/docs/listings/baby_fuzzer/listing-06/src/main.rs b/docs/listings/baby_fuzzer/listing-06/src/main.rs index 6faff74382..6d45c8ea97 100644 --- a/docs/listings/baby_fuzzer/listing-06/src/main.rs +++ b/docs/listings/baby_fuzzer/listing-06/src/main.rs @@ -1,6 +1,7 @@ /* ANCHOR: use */ extern crate libafl; extern crate libafl_bolts; +use std::num::NonZeroUsize; use libafl::{ corpus::{InMemoryCorpus, OnDiskCorpus}, @@ -17,7 +18,7 @@ use libafl::{ stages::mutational::StdMutationalStage, state::StdState, }; -use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice}; +use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice, nonzero}; use std::path::PathBuf; /* ANCHOR_END: use */ @@ -97,7 +98,7 @@ fn main() { .expect("Failed to create the Executor"); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/docs/src/DEBUGGING.md b/docs/src/DEBUGGING.md index d6063f968b..1485e7f7b4 100644 --- a/docs/src/DEBUGGING.md +++ b/docs/src/DEBUGGING.md @@ -20,7 +20,7 @@ Try running the fuzzer with the `introspection` feature of the `libafl`. This wi ``` let map = StdMapObserver::from_mut_ptr("edges", EDGES_MAP.as_mut_ptr(), EDGES_MAP.len()); ``` -You should *never* use the `EDGES_MAP`'s size as this is just the size of the allocated size of the coverage map. Consider using something smaller or our default value `libafl_targets::LIBAFL_EDGES_MAP_SIZE_IN_USE`. +You should *never* use the `EDGES_MAP`'s size as this is just the size of the allocated size of the coverage map. Consider using something smaller or our default value `libafl_targets::LIBAFL_EDGES_MAP_DEFAULT_SIZE`. ## Q. I still have problems with my fuzzer. Finally, if you really have no idea what is going on, run your fuzzer with logging enabled. (You can use `env_logger`, `SimpleStdoutLogger`, `SimpleStderrLogger` from `libafl_bolts`. `fuzzbench_text` has an example to show how to use it.) (Don't forget to enable stdout and stderr), and you can open an issue or ask us in Discord. diff --git a/docs/src/advanced_features/concolic.md b/docs/src/advanced_features/concolic.md index becdf2ead8..153607f84c 100644 --- a/docs/src/advanced_features/concolic.md +++ b/docs/src/advanced_features/concolic.md @@ -115,7 +115,7 @@ The `symcc_runtime` crate supports this use case and runtimes built with `symcc_ ## Hybrid Fuzzing in LibAFL -The LibAFL repository contains an [example hybrid fuzzer](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/libfuzzer_stb_image_concolic). +The LibAFL repository contains an [example hybrid fuzzer](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/structure_aware/libfuzzer_stb_image_concolic). There are three main steps involved with building a hybrid fuzzer using LibAFL: @@ -130,7 +130,7 @@ For example, we need to have a runtime ready before we can do instrumentation wi Building a custom runtime can be done easily using the `symcc_runtime` crate. Note, that a custom runtime is a separate shared object file, which means that we need a separate crate for our runtime. -Check out the [example hybrid fuzzer's runtime](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/libfuzzer_stb_image_concolic/runtime) and the [`symcc_runtime` docs](https://docs.rs/symcc_runtime/0.1/symcc_runtime) for inspiration. +Check out the [example hybrid fuzzer's runtime](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/structure_aware/libfuzzer_stb_image_concolic/runtime) and the [`symcc_runtime` docs](https://docs.rs/symcc_runtime/0.1/symcc_runtime) for inspiration. ### Instrumentation @@ -151,7 +151,7 @@ How exactly this is done does not matter. However, the SymCC compiler needs to be made aware of the location of the runtime that it should instrument against. This is done by setting the `SYMCC_RUNTIME_DIR` environment variable to the directory which contains the runtime (typically the `target/(debug|release)` folder of your runtime crate). -The example hybrid fuzzer instruments the target in its [`build.rs` build script](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/libfuzzer_stb_image_concolic/fuzzer/build.rs#L50). +The example hybrid fuzzer instruments the target in its [`build.rs` build script](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/build.rs#L50). It does this by cloning and building a copy of SymCC and then using this version to instrument the target. The [`symcc_libafl` crate](https://docs.rs/symcc_libafl) contains helper functions for cloning and building SymCC. @@ -169,7 +169,7 @@ No matter the instrumentation method, the interface between the fuzzer and the i The only difference between using SymCC and SymQEMU should be the binary that represents the target: In the case of SymCC it will be the binary that was build with instrumentation and with SymQEMU it will be the emulator binary (eg. `x86_64-linux-user/symqemu-x86_64`), followed by your uninstrumented target binary and its arguments. -You can use the [`CommandExecutor`](https://docs.rs/libafl/latest/libafl/executors/command/struct.CommandExecutor.html) to execute your target ([example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs#L244)). +You can use the [`CommandExecutor`](https://docs.rs/libafl/latest/libafl/executors/command/struct.CommandExecutor.html) to execute your target ([example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/src/main.rs#L244)). When configuring the command, make sure you pass the `SYMCC_INPUT_FILE` environment variable (set to the input file path), if your target reads input from a file (instead of standard input). #### Serialization and Solving @@ -184,4 +184,4 @@ It will attempt to solve all branches, like the original simple backend from Sym ### Example -The example fuzzer shows how to use the [`ConcolicTracingStage` together with the `SimpleConcolicMutationalStage`](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs#L222) to build a basic hybrid fuzzer. +The example fuzzer shows how to use the [`ConcolicTracingStage` together with the `SimpleConcolicMutationalStage`](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/src/main.rs#L222) to build a basic hybrid fuzzer. diff --git a/docs/src/advanced_features/frida.md b/docs/src/advanced_features/frida.md index 5d1532de9f..13e51030cd 100644 --- a/docs/src/advanced_features/frida.md +++ b/docs/src/advanced_features/frida.md @@ -4,7 +4,7 @@ LibAFL supports different instrumentation engines for binary-only fuzzing. A potent cross-platform (Windows, MacOS, Android, Linux, iOS) option for binary-only fuzzing is Frida; the dynamic instrumentation tool. In this section, we will talk about the components in fuzzing with `libafl_frida`. -You can take a look at a working example in our [`fuzzers/frida_libpng`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/frida_libpng) folder for Linux, and [`fuzzers/frida_gdiplus`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/frida_gdiplus) for Windows. +You can take a look at a working example in our [`fuzzers/binary_only/frida_libpng`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/binary_only/frida_libpng) folder for Linux, and [`fuzzers/binary_only/frida_windows_gdiplus`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/binary_only/frida_windows_gdiplus) for Windows. ## Dependencies @@ -84,4 +84,4 @@ You can then link this observer to `FridaInProcessExecutor` as follows: ``` And finally you can run the fuzzer. -See the `frida_` examples in [`./fuzzers`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/) for more information and, for linux or full-system, play around with `libafl_qemu`, another binary-only tracer. +See the `frida_` examples in [`./fuzzers/binary_only`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/binary_only/) for more information and, for linux or full-system, play around with `libafl_qemu`, another binary_only tracer. diff --git a/docs/src/advanced_features/no_std.md b/docs/src/advanced_features/no_std.md index ae3985be41..529e4148fd 100644 --- a/docs/src/advanced_features/no_std.md +++ b/docs/src/advanced_features/no_std.md @@ -37,4 +37,4 @@ pub extern "C" fn external_current_millis() -> u64 { } ``` -See [./fuzzers/baby_no_std](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/baby_no_std) for an example. +See [./fuzzers/fuzz_anything/baby_no_std](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/fuzz_anything/baby_no_std) for an example. diff --git a/docs/src/advanced_features/nyx.md b/docs/src/advanced_features/nyx.md index 01d3e7c76c..e78094afc7 100644 --- a/docs/src/advanced_features/nyx.md +++ b/docs/src/advanced_features/nyx.md @@ -24,7 +24,7 @@ For binary-only fuzzing, Nyx uses intel-PT(Intel® Processor Trace). You can fin ## Preparing the Nyx working directory -This step is used to pack the target into Nyx's kernel. Don't worry, we have a template shell script in our [example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_parallel/setup_libxml2.sh): +This step is used to pack the target into Nyx's kernel. Don't worry, we have a template shell script in our [example](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/full_system/nyx_libxml2_parallel/setup_libxml2.sh): the parameter's meaning is listed below: @@ -49,7 +49,7 @@ python3 ./packer/packer/nyx_config_gen.py /tmp/nyx_libxml2/ Kernel || exit ## Standalone fuzzing -In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_standalone/src/main.rs) you first need to run `./setup_libxml2.sh`. It will prepare your target and create your nyx work directory in `/tmp/libxml2`. After that, you can start to write your code. +In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/full_system/nyx_libxml2_standalone/src/main.rs) you first need to run `./setup_libxml2.sh`. It will prepare your target and create your nyx work directory in `/tmp/libxml2`. After that, you can start to write your code. First, to create `Nyxhelper`: @@ -71,7 +71,7 @@ Finally, use them normally and pass them into `fuzzer.fuzz_loop(&mut stages, &mu ## Parallel fuzzing -In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/nyx_libxml2_parallel/src/main.rs) you first need to run `./setup_libxml2.sh` as described before. +In the [example fuzzer](https://github.com/AFLplusplus/LibAFL/blob/main/fuzzers/full_system/nyx_libxml2_parallel/src/main.rs) you first need to run `./setup_libxml2.sh` as described before. Parallel fuzzing relies on [`Launcher`](../message_passing/spawn_instances.md), so spawn logic should be written in the scoop of anonymous function `run_client`: diff --git a/docs/src/baby_fuzzer/baby_fuzzer.md b/docs/src/baby_fuzzer/baby_fuzzer.md index b34645c5c7..86a4df5967 100644 --- a/docs/src/baby_fuzzer/baby_fuzzer.md +++ b/docs/src/baby_fuzzer/baby_fuzzer.md @@ -6,7 +6,7 @@ While the following chapters discuss the components of LibAFL in detail, here we We are going to fuzz a simple Rust function that panics under a condition. The fuzzer will be single-threaded and will stop after the crash, just like libFuzzer normally does. -You can find a complete version of this tutorial as an example fuzzer in [`fuzzers/baby_fuzzer`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/baby_fuzzer). +You can find a complete version of this tutorial as an example fuzzer in [`fuzzers/baby/baby_fuzzer`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/baby/baby_fuzzer). > ### Warning > @@ -222,4 +222,4 @@ Bye! As you can see, after the panic message, the `objectives` count of the log increased by one and you will find the crashing input in `crashes/`. -The complete code can be found in [`./fuzzers/baby_fuzzer`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/baby_fuzzer) alongside other `baby_` fuzzers. +The complete code can be found in [`./fuzzers/baby/baby_fuzzer`](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/baby/baby_fuzzer) alongside other `baby_` fuzzers. diff --git a/docs/src/baby_fuzzer/more_examples.md b/docs/src/baby_fuzzer/more_examples.md index aeac04163a..3b7fe8c116 100644 --- a/docs/src/baby_fuzzer/more_examples.md +++ b/docs/src/baby_fuzzer/more_examples.md @@ -1,6 +1,6 @@ # More Examples -Examples can be found under `./fuzzer`. +Examples can be found under `./fuzzers/baby`. |fuzzer name|usage| | ---- | ---- | diff --git a/docs/src/core_concepts/executor.md b/docs/src/core_concepts/executor.md index 1a677b6ef9..85ee0fff46 100644 --- a/docs/src/core_concepts/executor.md +++ b/docs/src/core_concepts/executor.md @@ -57,7 +57,7 @@ On your fuzzer side, you can allocate a shared memory region and make the `EDGES ```rust,ignore let mut shmem; unsafe{ - shmem = StdShMemProvider::new().unwrap().new_shmem(EDGES_MAP_SIZE_IN_USE).unwrap(); + shmem = StdShMemProvider::new().unwrap().new_shmem(EDGES_MAP_DEFAULT_SIZE).unwrap(); } let shmem_buf = shmem.as_slice_mut(); unsafe{ diff --git a/docs/src/core_concepts/mutator.md b/docs/src/core_concepts/mutator.md index b9ef13c4c7..1956befd7c 100644 --- a/docs/src/core_concepts/mutator.md +++ b/docs/src/core_concepts/mutator.md @@ -6,4 +6,6 @@ Mutators can be composed, and they are generally linked to a specific Input type There can be, for instance, a Mutator that applies more than a single type of mutation to the input. Consider a generic Mutator for a byte stream, bit flip is just one of the possible mutations but not the only one, there is also, for instance, the random replacement of a byte of the copy of a chunk. +There are also mutators that always produce valid inputs, say a mutator that generates valid JSON or code, but these grammar based mutators need a grammar to work. + In LibAFL, [`Mutator`](https://docs.rs/libafl/latest/libafl/mutators/trait.Mutator.html) is a trait. diff --git a/docs/src/design/architecture.md b/docs/src/design/architecture.md index 3cf6e76811..bdaa8ba54f 100644 --- a/docs/src/design/architecture.md +++ b/docs/src/design/architecture.md @@ -12,4 +12,4 @@ Beside the entities previously described, we introduce the [`Testcase`](https:// The State, in the implementation, contains only owned objects that are serializable, and it is serializable itself. Some fuzzers may want to serialize their state when pausing or just, when doing in-process fuzzing, serialize on crash and deserialize in the new process to continue to fuzz with all the metadata preserved. -Additionally, we group the entities that are "actions", like the `CorpusScheduler` and the `Feedbacks`, in a common place, the [`Fuzzer'](https://docs.rs/libafl/latest/libafl/fuzzer/struct.StdFuzzer.html). +Additionally, we group the entities that are "actions", like the `CorpusScheduler` and the `Feedbacks`, in a common place, the [`Fuzzer`](https://docs.rs/libafl/latest/libafl/fuzzer/struct.StdFuzzer.html). diff --git a/docs/src/design/metadata.md b/docs/src/design/metadata.md index d3a989d4c9..f340d9ca33 100644 --- a/docs/src/design/metadata.md +++ b/docs/src/design/metadata.md @@ -27,7 +27,7 @@ Metadata objects are primarly intended to be used inside [`SerdeAnyMap`](https:/ With these maps, the user can retrieve instances by type (and name). Internally, the instances are stored as SerdeAny trait objects. -Structs that want to have a set of metadata must implement the [`HasMetadata`](https://docs.rs/libafl/latest/libafl/state/trait.HasMetadata.html) trait. +Structs that want to have a set of metadata must implement the [`HasMetadata`](https://docs.rs/libafl/latest/libafl/common/trait.HasMetadata.html) trait. By default, Testcase and State implement it and hold a SerdeAnyMap testcase. diff --git a/docs/src/design/migration-0.11.md b/docs/src/design/migration-0.11.md index 8a2bdab0cf..c236bdbafe 100644 --- a/docs/src/design/migration-0.11.md +++ b/docs/src/design/migration-0.11.md @@ -12,7 +12,7 @@ Some cross-platform things in bolts include * ShMem: A cross-platform (Windows, Linux, Android, MacOS) shared memory implementation * LLMP: A fast, lock-free IPC mechanism via SharedMap * Core_affinity: A maintained version of `core_affinity` that can be used to get core information and bind processes to cores -* Rands: Fast random number generators for fuzzing (like [RomuRand](http://www.romu-random.org/)) +* Rands: Fast random number generators for fuzzing (like [RomuRand](https://www.romu-random.org/)) * MiniBSOD: get and print information about the current process state including important registers. * Tuples: Haskel-like compile-time tuple lists * Os: OS specific stuff like signal handling, windows exception handling, pipes, and helpers for `fork` diff --git a/docs/src/design/migration-0.9.md b/docs/src/design/migration-0.9.md index b48c3e7711..1c8292f9a6 100644 --- a/docs/src/design/migration-0.9.md +++ b/docs/src/design/migration-0.9.md @@ -169,7 +169,7 @@ from libafl_target's `EDGES_MAP`. In the future, instead of using: ```rust,ignore -let edges = unsafe { &mut EDGES_MAP[0..EDGES_MAP_SIZE_IN_USE] }; +let edges = unsafe { &mut EDGES_MAP[0..EDGES_MAP_DEFAULT_SIZE] }; let edges_observer = StdMapObserver::new("edges", edges); ``` diff --git a/docs/src/getting_started/crates.md b/docs/src/getting_started/crates.md index cbb61d2694..e90ad3c269 100644 --- a/docs/src/getting_started/crates.md +++ b/docs/src/getting_started/crates.md @@ -51,7 +51,7 @@ In it, you'll find highlights like: The sugar crate abstracts away most of the complexity of LibAFL's API. Instead of high flexibility, it aims to be high-level and easy-to-use. It is not as flexible as stitching your fuzzer together from each individual component, but allows you to build a fuzzer with minimal lines of code. -To see it in action, take a look at the [`libfuzzer_stb_image_sugar` example fuzzer](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/libfuzzer_stb_image_sugar). +To see it in action, take a look at the [`libfuzzer_stb_image_sugar` example fuzzer](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/inprocess/libfuzzer_stb_image_sugar). ### [`libafl_derive`](https://github.com/AFLplusplus/LibAFL/tree/main/libafl_derive) diff --git a/docs/src/getting_started/setup.md b/docs/src/getting_started/setup.md index 98076c44fe..f20edfdd5f 100644 --- a/docs/src/getting_started/setup.md +++ b/docs/src/getting_started/setup.md @@ -54,6 +54,6 @@ explained [here](https://clang.llvm.org/get_started.html). If you do not have Rust installed, you can easily follow the steps described [here](https://www.rust-lang.org/tools/install) to install it on any supported system. -Be aware that Rust versions shipped with Linux distributions may be outdated, LibAFL always targets the latest `stable` version available via `rustup upgrade`. +Be aware that Rust versions shipped with Linux distributions may be outdated, LibAFL always targets the latest `stable` version available via `rustup update`. We suggest installing Clang and LLVM first. diff --git a/docs/src/message_passing/message_passing.md b/docs/src/message_passing/message_passing.md index 0d54418c1e..1bbf0ca9c1 100644 --- a/docs/src/message_passing/message_passing.md +++ b/docs/src/message_passing/message_passing.md @@ -75,7 +75,7 @@ So the outgoing messages flow is like this over the outgoing broadcast `Shmem`: To use `LLMP` in LibAFL, you usually want to use an `LlmpEventManager` or its restarting variant. They are the default if using LibAFL's `Launcher`. -If you should want to use `LLMP` in its raw form, without any `LibAFL` abstractions, take a look at the `llmp_test` example in [./libafl/examples](https://github.com/AFLplusplus/LibAFL/blob/main/libafl/examples/llmp_test/main.rs). +If you should want to use `LLMP` in its raw form, without any `LibAFL` abstractions, take a look at the `llmp_test` example in [./libafl/examples](https://github.com/AFLplusplus/LibAFL/blob/main/libafl_bolts/examples/llmp_test/main.rs). You can run the example using `cargo run --example llmp_test` with the appropriate modes, as indicated by its help output. First, you will have to create a broker using `LlmpBroker::new()`. Then, create some `LlmpClient`s in other threads and register them with the main thread using `LlmpBroker::register_client`. diff --git a/docs/src/message_passing/spawn_instances.md b/docs/src/message_passing/spawn_instances.md index 2f8301c308..0c532848dd 100644 --- a/docs/src/message_passing/spawn_instances.md +++ b/docs/src/message_passing/spawn_instances.md @@ -4,7 +4,7 @@ Multiple fuzzer instances can be spawned using different ways. ## Manually, via a TCP port -The straightforward way to do Multi-Threading is to use the [`LlmpRestartingEventManager`](https://docs.rs/libafl/latest/libafl/events/llmp/struct.LlmpRestartingEventManager.html), specifically to use [`setup_restarting_mgr_std`](https://docs.rs/libafl/latest/libafl/events/llmp/fn.setup_restarting_mgr_std.html). +The straightforward way to do Multi-Threading is to use the [`LlmpRestartingEventManager`](https://docs.rs/libafl/latest/libafl/events/llmp/restarting/struct.LlmpRestartingEventManager.html), specifically to use [`setup_restarting_mgr_std`](https://docs.rs/libafl/latest/libafl/events/llmp/restarting/fn.setup_restarting_mgr_std.html). It abstracts away all the pesky details about restarts on crash handling (for in-memory fuzzers) and multi-threading. With it, every instance you launch manually tries to connect to a TCP port on the local machine. diff --git a/docs/src/tutorial/intro.md b/docs/src/tutorial/intro.md index 8eea4288db..7b6cd323cb 100644 --- a/docs/src/tutorial/intro.md +++ b/docs/src/tutorial/intro.md @@ -5,4 +5,4 @@ > This section is under construction. > Please check back later (or open a PR) > -> In the meantime, find the final Lain-based fuzzer in [the fuzzers folder](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/tutorial) +> In the meantime, find the final Lain-based fuzzer in [the fuzzers folder](https://github.com/AFLplusplus/LibAFL/tree/main/fuzzers/baby/tutorial) diff --git a/fuzzers/README.md b/fuzzers/README.md new file mode 100644 index 0000000000..22aaffcafa --- /dev/null +++ b/fuzzers/README.md @@ -0,0 +1,34 @@ +# LibAFL Fuzzers + +## Example fuzzers + +You can find a large amount of example fuzzers built on top of LibAFL. +They are sorted by focus: + +- [`baby`](./baby/): Minimal fuzzers and fuzzers demonstrating specific features that don't fit anywhere else. +- [`binary_only`](./binary_only/): Fuzzers for binary-only targets. +- [`forkserver`](./forkserver/): Fuzzers that use a forkserver-style executor. +- [`full_system`](./full_system/): Fuzzers for full-system targets (kernels, firmwares, etc...). +- [`fuzz_anything`](./fuzz_anything/): Fuzzers for advanced targets like WASM or python, and other fuzzers that can be used for anything. +- [`inprocess`](./inprocess/): Common In-process fuzzers. Most of the time, this is what you want. +- [`structure_aware`](./structure_aware/): Grammar fuzzers, fuzzers for certain languages, fuzzers with custom inputs, and more. + +(Some fuzzers may fit into multiple categories, in which case we sort them as it makes sense, for example `structure_aware > full_system > binary_only > the rest`) + +## Fully-feature Fuzzers + +Some rather complete fuzzers worth looking at are: + +- [`Libfuzzer_Libpng_Launcher`](./inprocess/libfuzzer_libpng_launcher): That's what most people want to use: our InProcess fuzzer with a lot of features like ASAn on some cores, multi threading (a better libfuzzer). +- [`LibAFL-fuzz`](./forkserver/libafl-fuzz/): A reimplementation of afl-fuzz, the traditional forkserver fuzzer that tries to emulate the command line and behavior. +- [`LibAFL-QEMU-Launcher`](./binary_only/qemu_launcher/): A full-featured QEMU-mode fuzzer that runs on multiple cores + +They may not be the best starting point for your own custom fuzzer, but they might be easy enough to just use. + +## Paper Artifacts + +Multiple papers based on LibAFL have been published and include artifacts. +Here is a list of LibAFL artifacts: + +- Fuzzbench implementation: https://github.com/AFLplusplus/libafl_fuzzbench +- LibAFL QEMU experiments: https://github.com/AFLplusplus/libafl_qemu_artifacts diff --git a/fuzzers/baby_fuzzer/.gitignore b/fuzzers/baby/baby_fuzzer/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer/.gitignore rename to fuzzers/baby/baby_fuzzer/.gitignore diff --git a/fuzzers/baby/baby_fuzzer/Cargo.toml b/fuzzers/baby/baby_fuzzer/Cargo.toml new file mode 100644 index 0000000000..9be1d86f24 --- /dev/null +++ b/fuzzers/baby/baby_fuzzer/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "baby_fuzzer" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +tui = [] +std = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl", features = ["tui_monitor"] } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/baby_fuzzer/README.md b/fuzzers/baby/baby_fuzzer/README.md similarity index 100% rename from fuzzers/baby_fuzzer/README.md rename to fuzzers/baby/baby_fuzzer/README.md diff --git a/fuzzers/baby_fuzzer/src/main.rs b/fuzzers/baby/baby_fuzzer/src/main.rs similarity index 90% rename from fuzzers/baby_fuzzer/src/main.rs rename to fuzzers/baby/baby_fuzzer/src/main.rs index 90975578f1..c98b028026 100644 --- a/fuzzers/baby_fuzzer/src/main.rs +++ b/fuzzers/baby/baby_fuzzer/src/main.rs @@ -3,24 +3,24 @@ use std::ptr::write_volatile; use std::{path::PathBuf, ptr::write}; #[cfg(feature = "tui")] -use libafl::monitors::tui::{ui::TuiUI, TuiMonitor}; +use libafl::monitors::tui::TuiMonitor; #[cfg(not(feature = "tui"))] use libafl::monitors::SimpleMonitor; use libafl::{ corpus::{InMemoryCorpus, OnDiskCorpus}, events::SimpleEventManager, - executors::{inprocess::InProcessExecutor, ExitKind}, + executors::{ExitKind, InProcessExecutor}, feedbacks::{CrashFeedback, MaxMapFeedback}, fuzzer::{Fuzzer, StdFuzzer}, generators::RandPrintablesGenerator, inputs::{BytesInput, HasTargetBytes}, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::StdMapObserver, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, state::StdState, }; -use libafl_bolts::{current_nanos, rands::StdRand, tuples::tuple_list, AsSlice}; +use libafl_bolts::{current_nanos, nonzero, rands::StdRand, tuples::tuple_list, AsSlice}; /// Coverage map with explicit assignments due to the lack of instrumentation static mut SIGNALS: [u8; 16] = [0; 16]; @@ -90,9 +90,10 @@ pub fn main() { #[cfg(not(feature = "tui"))] let mon = SimpleMonitor::new(|s| println!("{s}")); #[cfg(feature = "tui")] - let ui = TuiUI::with_version(String::from("Baby Fuzzer"), String::from("0.0.1"), false); - #[cfg(feature = "tui")] - let mon = TuiMonitor::new(ui); + let mon = TuiMonitor::builder() + .title("Baby Fuzzer") + .enhanced_graphics(false) + .build(); // The event manager handle the various events generated during the fuzzing loop // such as the notification of the addition of a new item to the corpus @@ -115,7 +116,7 @@ pub fn main() { .expect("Failed to create the Executor"); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/baby/baby_fuzzer_custom_executor/.gitignore b/fuzzers/baby/baby_fuzzer_custom_executor/.gitignore new file mode 100644 index 0000000000..e0921f291e --- /dev/null +++ b/fuzzers/baby/baby_fuzzer_custom_executor/.gitignore @@ -0,0 +1,2 @@ +libpng-* +corpus \ No newline at end of file diff --git a/fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/Cargo.toml b/fuzzers/baby/baby_fuzzer_custom_executor/Cargo.toml similarity index 61% rename from fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/Cargo.toml rename to fuzzers/baby/baby_fuzzer_custom_executor/Cargo.toml index 5d09172a78..f7905ec4c2 100644 --- a/fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/Cargo.toml +++ b/fuzzers/baby/baby_fuzzer_custom_executor/Cargo.toml @@ -1,10 +1,15 @@ [package] -name = "rust_code_with_inprocess_executor" -version = "0.0.1" +name = "fuzzer_custom_executor" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] edition = "2021" [features] default = ["std"] +tui = ["libafl/tui_monitor"] std = [] [profile.dev] diff --git a/fuzzers/baby/baby_fuzzer_custom_executor/Makefile.toml b/fuzzers/baby/baby_fuzzer_custom_executor/Makefile.toml new file mode 100644 index 0000000000..5b4eed6324 --- /dev/null +++ b/fuzzers/baby/baby_fuzzer_custom_executor/Makefile.toml @@ -0,0 +1,50 @@ +# Variables +[env] +FUZZER_NAME = 'fuzzer_custom_executor' +PROJECT_DIR = { script = ["pwd"] } +CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release" } +PROFILE_DIR = { value = "release" } +FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' + +[tasks.build] +alias = "fuzzer" + +[tasks.fuzzer] +description = "Build the fuzzer" +script = "cargo build --profile=${PROFILE}" + +[tasks.run] +description = "Run the fuzzer" +command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}" +dependencies = ["fuzzer"] + +[tasks.test] +description = "Run a short test" +linux_alias = "test_unix" +mac_alias = "test_unix" +windows_alias = "unsupported" + +[tasks.test_unix] +script_runner = "@shell" +script = ''' +timeout 30s ${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME} | tee fuzz_stdout.log || true +if grep -qa "objectives: 1" fuzz_stdout.log; then + echo "Fuzzer is working" +else + echo "Fuzzer does not generate any testcases or any crashes" + exit 1 +fi +''' +dependencies = ["fuzzer"] + +# Clean up +[tasks.clean] +# Disable default `clean` definition +clear = true +script_runner = "@shell" +script = ''' +cargo clean +''' diff --git a/fuzzers/baby/baby_fuzzer_custom_executor/README.md b/fuzzers/baby/baby_fuzzer_custom_executor/README.md new file mode 100644 index 0000000000..0a3d16475b --- /dev/null +++ b/fuzzers/baby/baby_fuzzer_custom_executor/README.md @@ -0,0 +1,12 @@ +# Baby fuzzer with Custom Executor + +This is a minimalistic example about how to create a LibAFL-based fuzzer. + +In contrast to the normal baby fuzzer, this uses a (very simple) custom executor. + +The custom executor won't catch any timeouts or actual errors (i.e., memory corruptions, etc.) in the target. + +The tested program is a simple Rust function without any instrumentation. +For real fuzzing, you will want to add some sort to add coverage or other feedback. + +You can run this example using `cargo run`, and you can enable the TUI feature by running `cargo run --features tui`. diff --git a/fuzzers/baby/baby_fuzzer_custom_executor/src/main.rs b/fuzzers/baby/baby_fuzzer_custom_executor/src/main.rs new file mode 100644 index 0000000000..003839f84d --- /dev/null +++ b/fuzzers/baby/baby_fuzzer_custom_executor/src/main.rs @@ -0,0 +1,159 @@ +#[cfg(windows)] +use std::ptr::write_volatile; +use std::{marker::PhantomData, path::PathBuf, ptr::write}; + +#[cfg(feature = "tui")] +use libafl::monitors::tui::TuiMonitor; +#[cfg(not(feature = "tui"))] +use libafl::monitors::SimpleMonitor; +use libafl::{ + corpus::{InMemoryCorpus, OnDiskCorpus}, + events::SimpleEventManager, + executors::{Executor, ExitKind, WithObservers}, + feedback_and_fast, + feedbacks::{CrashFeedback, MaxMapFeedback}, + fuzzer::{Fuzzer, StdFuzzer}, + generators::RandPrintablesGenerator, + inputs::HasTargetBytes, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, + observers::StdMapObserver, + schedulers::QueueScheduler, + stages::mutational::StdMutationalStage, + state::{HasExecutions, State, StdState, UsesState}, +}; +use libafl_bolts::{current_nanos, nonzero, rands::StdRand, tuples::tuple_list, AsSlice}; + +/// Coverage map with explicit assignments due to the lack of instrumentation +static mut SIGNALS: [u8; 16] = [0; 16]; +static mut SIGNALS_PTR: *mut u8 = &raw mut SIGNALS as _; +static SIGNALS_LEN: usize = unsafe { (*&raw const (SIGNALS)).len() }; + +/// Assign a signal to the signals map +fn signals_set(idx: usize) { + unsafe { write(SIGNALS_PTR.add(idx), 1) }; +} + +struct CustomExecutor { + phantom: PhantomData, +} + +impl CustomExecutor { + pub fn new(_state: &S) -> Self { + Self { + phantom: PhantomData, + } + } +} + +impl UsesState for CustomExecutor { + type State = S; +} + +impl Executor for CustomExecutor +where + EM: UsesState, + S: State + HasExecutions, + Z: UsesState, + Self::Input: HasTargetBytes, +{ + fn run_target( + &mut self, + _fuzzer: &mut Z, + state: &mut Self::State, + _mgr: &mut EM, + input: &Self::Input, + ) -> Result { + // We need to keep track of the exec count. + *state.executions_mut() += 1; + + let target = input.target_bytes(); + let buf = target.as_slice(); + signals_set(0); + if !buf.is_empty() && buf[0] == b'a' { + signals_set(1); + if buf.len() > 1 && buf[1] == b'b' { + signals_set(2); + if buf.len() > 2 && buf[2] == b'c' { + return Ok(ExitKind::Crash); + } + } + } + Ok(ExitKind::Ok) + } +} + +#[allow(clippy::similar_names, clippy::manual_assert)] +pub fn main() { + // Create an observation channel using the signals map + let observer = unsafe { StdMapObserver::from_mut_ptr("signals", SIGNALS_PTR, SIGNALS_LEN) }; + + // Feedback to rate the interestingness of an input + let mut feedback = MaxMapFeedback::new(&observer); + + // A feedback to choose if an input is a solution or not + let mut objective = feedback_and_fast!( + // Look for crashes. + CrashFeedback::new(), + // We `and` the MaxMapFeedback to only end up with crashes that trigger new coverage. + // We use the _fast variant to make sure it's not evaluated every time, even if the crash didn't trigger.. + // We have to give this one a name since it differs from the first map. + MaxMapFeedback::with_name("on_crash", &observer) + ); + + // create a State from scratch + let mut state = StdState::new( + // RNG + StdRand::with_seed(current_nanos()), + // Corpus that will be evolved, we keep it in memory for performance + InMemoryCorpus::new(), + // Corpus in which we store solutions (crashes in this example), + // on disk so the user can get them after stopping the fuzzer + OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(), + // States of the feedbacks. + // The feedbacks can report the data that should persist in the State. + &mut feedback, + // Same for objective feedbacks + &mut objective, + ) + .unwrap(); + + // The Monitor trait define how the fuzzer stats are displayed to the user + #[cfg(not(feature = "tui"))] + let mon = SimpleMonitor::new(|s| println!("{s}")); + #[cfg(feature = "tui")] + let mon = TuiMonitor::builder() + .title("Baby Fuzzer") + .enhanced_graphics(false) + .build(); + + // The event manager handle the various events generated during the fuzzing loop + // such as the notification of the addition of a new item to the corpus + let mut mgr = SimpleEventManager::new(mon); + + // A queue policy to get testcasess from the corpus + let scheduler = QueueScheduler::new(); + + // A fuzzer with feedbacks and a corpus scheduler + let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); + + // Create the executor for an in-process function with just one observer + let executor = CustomExecutor::new(&state); + + let mut executor = WithObservers::new(executor, tuple_list!(observer)); + + // Generator of printable bytearrays of max size 32 + let mut generator = RandPrintablesGenerator::with_min_size(nonzero!(1), nonzero!(32)); + + // Generate 8 initial inputs + state + .generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8) + .expect("Failed to generate the initial corpus"); + + // Setup a mutational stage with a basic bytes mutator + let mutator = StdScheduledMutator::new(havoc_mutations()); + let mut stages = tuple_list!(StdMutationalStage::new(mutator)); + + fuzzer + .fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr) + .expect("Error in the fuzzing loop"); +} diff --git a/fuzzers/baby_fuzzer_minimizing/.gitignore b/fuzzers/baby/baby_fuzzer_minimizing/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer_minimizing/.gitignore rename to fuzzers/baby/baby_fuzzer_minimizing/.gitignore diff --git a/fuzzers/baby/baby_fuzzer_minimizing/Cargo.toml b/fuzzers/baby/baby_fuzzer_minimizing/Cargo.toml new file mode 100644 index 0000000000..1623c2061e --- /dev/null +++ b/fuzzers/baby/baby_fuzzer_minimizing/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "baby_fuzzer_minimizing" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", + "Addison Crump ", +] +edition = "2021" + +[features] +default = ["std"] +tui = ["libafl/tui_monitor"] +std = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl", features = ["prelude"] } +libafl_bolts = { path = "../../../libafl_bolts", features = ["prelude"] } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/baby_fuzzer_minimizing/README.md b/fuzzers/baby/baby_fuzzer_minimizing/README.md similarity index 100% rename from fuzzers/baby_fuzzer_minimizing/README.md rename to fuzzers/baby/baby_fuzzer_minimizing/README.md diff --git a/fuzzers/baby_fuzzer_minimizing/src/main.rs b/fuzzers/baby/baby_fuzzer_minimizing/src/main.rs similarity index 92% rename from fuzzers/baby_fuzzer_minimizing/src/main.rs rename to fuzzers/baby/baby_fuzzer_minimizing/src/main.rs index 0b5a4e9d51..a61f3bf682 100644 --- a/fuzzers/baby_fuzzer_minimizing/src/main.rs +++ b/fuzzers/baby/baby_fuzzer_minimizing/src/main.rs @@ -86,7 +86,7 @@ pub fn main() -> Result<(), Error> { .expect("Failed to create the Executor"); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state @@ -94,8 +94,8 @@ pub fn main() -> Result<(), Error> { .expect("Failed to generate the initial corpus"); // Setup a mutational stage with a basic bytes mutator - let mutator = StdScheduledMutator::new(havoc_mutations::()); - let minimizer = StdScheduledMutator::new(havoc_mutations::()); + let mutator = StdScheduledMutator::new(havoc_mutations()); + let minimizer = StdScheduledMutator::new(havoc_mutations()); let mut stages = tuple_list!( StdMutationalStage::new(mutator), StdTMinMutationalStage::new(minimizer, factory, 128) @@ -121,11 +121,11 @@ pub fn main() -> Result<(), Error> { let mut mgr = SimpleEventManager::new(mon); - let minimizer = StdScheduledMutator::new(havoc_mutations::()); + let minimizer = StdScheduledMutator::new(havoc_mutations()); let mut stages = tuple_list!(StdTMinMutationalStage::new( minimizer, CrashFeedback::new(), - 1 << 10 + 1 << 10, )); let scheduler = QueueScheduler::new(); @@ -138,7 +138,9 @@ pub fn main() -> Result<(), Error> { state.load_initial_inputs_forced(&mut fuzzer, &mut executor, &mut mgr, &[solution_dir])?; - state.set_corpus_idx(CorpusId::from(0_usize))?; + let first_id = state.corpus().first().expect("Empty corpus"); + state.set_corpus_id(first_id)?; + stages.perform_all(&mut fuzzer, &mut executor, &mut state, &mut mgr)?; Ok(()) diff --git a/fuzzers/baby_fuzzer_gramatron/.gitignore b/fuzzers/baby/baby_fuzzer_swap_differential/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer_gramatron/.gitignore rename to fuzzers/baby/baby_fuzzer_swap_differential/.gitignore diff --git a/fuzzers/baby/baby_fuzzer_swap_differential/Cargo.toml b/fuzzers/baby/baby_fuzzer_swap_differential/Cargo.toml new file mode 100644 index 0000000000..89da0f1292 --- /dev/null +++ b/fuzzers/baby/baby_fuzzer_swap_differential/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "baby_fuzzer_swap_differential" +version = "0.14.1" +authors = ["Addison Crump "] +edition = "2021" +default-run = "fuzzer_sd" + +[features] +tui = ["libafl/tui_monitor"] +multimap = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +anyhow = "1.0.89" +bindgen = "0.70.1" +cc = "1.1.21" + +[dependencies] +libafl = { path = "../../../libafl", features = ["tui_monitor"] } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", + "sancov_cmplog", + "pointer_maps", +] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } + +libafl_cc = { path = "../../../libafl_cc" } + +[[bin]] +name = "fuzzer_sd" +path = "src/main.rs" + +[[bin]] +name = "libafl_cc" +path = "src/bin/libafl_cc.rs" diff --git a/fuzzers/baby_fuzzer_swap_differential/Makefile.toml b/fuzzers/baby/baby_fuzzer_swap_differential/Makefile.toml similarity index 73% rename from fuzzers/baby_fuzzer_swap_differential/Makefile.toml rename to fuzzers/baby/baby_fuzzer_swap_differential/Makefile.toml index 6ff335b2eb..96f6e1f907 100644 --- a/fuzzers/baby_fuzzer_swap_differential/Makefile.toml +++ b/fuzzers/baby/baby_fuzzer_swap_differential/Makefile.toml @@ -1,23 +1,25 @@ # Variables [env] -FUZZER_NAME='fuzzer_sd' +FUZZER_NAME = 'fuzzer_sd' PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } +CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } PROFILE = { value = "release" } -PROFILE_DIR = {value = "release" } +PROFILE_DIR = { value = "release" } LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' # Compilers [tasks.cc] command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--bin", "libafl_cc"] +args = ["build", "--profile", "${PROFILE}", "--bin", "libafl_cc"] # Harness [tasks.fuzzer] command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--bin", "${FUZZER_NAME}"] -dependencies = [ "cc" ] +args = ["build", "--profile", "${PROFILE}", "--bin", "${FUZZER_NAME}"] +dependencies = ["cc"] [tasks.build] alias = "fuzzer" @@ -25,7 +27,7 @@ alias = "fuzzer" # Run the fuzzer [tasks.run] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}" -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Test [tasks.test] @@ -35,7 +37,7 @@ windows_alias = "unsupported" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' timeout 30s ${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME} | tee fuzz_stdout.log || true if grep -qa "objectives: 1" fuzz_stdout.log; then echo "Fuzzer is working" @@ -44,13 +46,13 @@ else exit 1 fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cargo clean ''' diff --git a/fuzzers/baby_fuzzer_swap_differential/README.md b/fuzzers/baby/baby_fuzzer_swap_differential/README.md similarity index 100% rename from fuzzers/baby_fuzzer_swap_differential/README.md rename to fuzzers/baby/baby_fuzzer_swap_differential/README.md diff --git a/fuzzers/baby_fuzzer_swap_differential/build.rs b/fuzzers/baby/baby_fuzzer_swap_differential/build.rs similarity index 100% rename from fuzzers/baby_fuzzer_swap_differential/build.rs rename to fuzzers/baby/baby_fuzzer_swap_differential/build.rs diff --git a/fuzzers/baby_fuzzer_swap_differential/common.c b/fuzzers/baby/baby_fuzzer_swap_differential/common.c similarity index 100% rename from fuzzers/baby_fuzzer_swap_differential/common.c rename to fuzzers/baby/baby_fuzzer_swap_differential/common.c diff --git a/fuzzers/baby_fuzzer_swap_differential/common.h b/fuzzers/baby/baby_fuzzer_swap_differential/common.h similarity index 100% rename from fuzzers/baby_fuzzer_swap_differential/common.h rename to fuzzers/baby/baby_fuzzer_swap_differential/common.h diff --git a/fuzzers/baby_fuzzer_swap_differential/first.c b/fuzzers/baby/baby_fuzzer_swap_differential/first.c similarity index 100% rename from fuzzers/baby_fuzzer_swap_differential/first.c rename to fuzzers/baby/baby_fuzzer_swap_differential/first.c diff --git a/fuzzers/baby_fuzzer_swap_differential/first.h b/fuzzers/baby/baby_fuzzer_swap_differential/first.h similarity index 100% rename from fuzzers/baby_fuzzer_swap_differential/first.h rename to fuzzers/baby/baby_fuzzer_swap_differential/first.h diff --git a/fuzzers/baby_fuzzer_swap_differential/second.c b/fuzzers/baby/baby_fuzzer_swap_differential/second.c similarity index 100% rename from fuzzers/baby_fuzzer_swap_differential/second.c rename to fuzzers/baby/baby_fuzzer_swap_differential/second.c diff --git a/fuzzers/baby_fuzzer_swap_differential/second.h b/fuzzers/baby/baby_fuzzer_swap_differential/second.h similarity index 100% rename from fuzzers/baby_fuzzer_swap_differential/second.h rename to fuzzers/baby/baby_fuzzer_swap_differential/second.h diff --git a/fuzzers/baby_fuzzer_swap_differential/src/bin/libafl_cc.rs b/fuzzers/baby/baby_fuzzer_swap_differential/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/baby_fuzzer_swap_differential/src/bin/libafl_cc.rs rename to fuzzers/baby/baby_fuzzer_swap_differential/src/bin/libafl_cc.rs diff --git a/fuzzers/baby_fuzzer_swap_differential/src/main.rs b/fuzzers/baby/baby_fuzzer_swap_differential/src/main.rs similarity index 96% rename from fuzzers/baby_fuzzer_swap_differential/src/main.rs rename to fuzzers/baby/baby_fuzzer_swap_differential/src/main.rs index 39edae9895..582e1cd56c 100644 --- a/fuzzers/baby_fuzzer_swap_differential/src/main.rs +++ b/fuzzers/baby/baby_fuzzer_swap_differential/src/main.rs @@ -6,7 +6,7 @@ use std::{ }; #[cfg(feature = "tui")] -use libafl::monitors::tui::{ui::TuiUI, TuiMonitor}; +use libafl::monitors::tui::TuiMonitor; #[cfg(not(feature = "tui"))] use libafl::monitors::SimpleMonitor; use libafl::{ @@ -17,13 +17,13 @@ use libafl::{ fuzzer::{Fuzzer, StdFuzzer}, generators::RandPrintablesGenerator, inputs::{BytesInput, HasTargetBytes}, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::StdMapObserver, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, state::{HasSolutions, StdState}, }; -use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice}; +use libafl_bolts::{nonzero, rands::StdRand, tuples::tuple_list, AsSlice}; use libafl_targets::{edges_max_num, DifferentialAFLMapSwapObserver}; #[cfg(not(miri))] use mimalloc::MiMalloc; @@ -204,9 +204,10 @@ pub fn main() { #[cfg(not(feature = "tui"))] let mon = SimpleMonitor::with_user_monitor(|s| println!("{s}")); #[cfg(feature = "tui")] - let ui = TuiUI::new(String::from("Baby Fuzzer"), false); - #[cfg(feature = "tui")] - let mon = TuiMonitor::new(ui); + let mon = TuiMonitor::builder() + .title("Baby Fuzzer") + .enhanced_graphics(false) + .build(); // The event manager handle the various events generated during the fuzzing loop // such as the notification of the addition of a new item to the corpus @@ -246,7 +247,7 @@ pub fn main() { ); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/baby_fuzzer_grimoire/.gitignore b/fuzzers/baby/baby_fuzzer_unicode/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer_grimoire/.gitignore rename to fuzzers/baby/baby_fuzzer_unicode/.gitignore diff --git a/fuzzers/baby/baby_fuzzer_unicode/Cargo.toml b/fuzzers/baby/baby_fuzzer_unicode/Cargo.toml new file mode 100644 index 0000000000..0f5e44c398 --- /dev/null +++ b/fuzzers/baby/baby_fuzzer_unicode/Cargo.toml @@ -0,0 +1,28 @@ +[package] +name = "baby_fuzzer_unicode" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +tui = ["libafl/tui_monitor"] +std = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl", features = ["unicode", "tui_monitor"] } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/baby_fuzzer_unicode/README.md b/fuzzers/baby/baby_fuzzer_unicode/README.md similarity index 100% rename from fuzzers/baby_fuzzer_unicode/README.md rename to fuzzers/baby/baby_fuzzer_unicode/README.md diff --git a/fuzzers/baby_fuzzer_unicode/src/main.rs b/fuzzers/baby/baby_fuzzer_unicode/src/main.rs similarity index 92% rename from fuzzers/baby_fuzzer_unicode/src/main.rs rename to fuzzers/baby/baby_fuzzer_unicode/src/main.rs index dfc3ac56ab..77d5772b9d 100644 --- a/fuzzers/baby_fuzzer_unicode/src/main.rs +++ b/fuzzers/baby/baby_fuzzer_unicode/src/main.rs @@ -3,7 +3,7 @@ use std::ptr::write_volatile; use std::{path::PathBuf, ptr::write}; #[cfg(feature = "tui")] -use libafl::monitors::tui::{ui::TuiUI, TuiMonitor}; +use libafl::monitors::tui::TuiMonitor; #[cfg(not(feature = "tui"))] use libafl::monitors::SimpleMonitor; use libafl::{ @@ -24,7 +24,8 @@ use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice}; /// Coverage map with explicit assignments due to the lack of instrumentation static mut SIGNALS: [u8; 64] = [0; 64]; -static mut SIGNALS_PTR: *mut u8 = unsafe { SIGNALS.as_mut_ptr() }; +static mut SIGNALS_PTR: *mut u8 = (&raw mut SIGNALS).cast(); +static mut SIGNALS_LEN: usize = unsafe { (*&raw const SIGNALS).len() }; /// Assign a signal to the signals map fn signals_set(idx: usize) { @@ -56,7 +57,7 @@ pub fn main() { }; // Create an observation channel using the signals map - let observer = unsafe { StdMapObserver::from_mut_ptr("signals", SIGNALS_PTR, SIGNALS.len()) }; + let observer = unsafe { StdMapObserver::from_mut_ptr("signals", SIGNALS_PTR, SIGNALS_LEN) }; // Feedback to rate the interestingness of an input let mut feedback = MaxMapFeedback::new(&observer); @@ -85,9 +86,11 @@ pub fn main() { #[cfg(not(feature = "tui"))] let mon = SimpleMonitor::new(|s| println!("{s}")); #[cfg(feature = "tui")] - let ui = TuiUI::with_version(String::from("Baby Fuzzer"), String::from("0.0.1"), false); - #[cfg(feature = "tui")] - let mon = TuiMonitor::new(ui); + let mon = TuiMonitor::builder() + .title("Baby Fuzzer") + .version("0.0.1") + .enhanced_graphics(false) + .build(); // The event manager handle the various events generated during the fuzzing loop // such as the notification of the addition of a new item to the corpus diff --git a/fuzzers/backtrace_baby_fuzzers/README.md b/fuzzers/baby/backtrace_baby_fuzzers/README.md similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/README.md rename to fuzzers/baby/backtrace_baby_fuzzers/README.md diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/.cargo/config b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/.cargo/config similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/.cargo/config rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/.cargo/config diff --git a/fuzzers/baby_fuzzer_with_forkexecutor/.gitignore b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer_with_forkexecutor/.gitignore rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/.gitignore diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/Cargo.toml b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/Cargo.toml similarity index 52% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/Cargo.toml rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/Cargo.toml index 4c3981f802..544ec2d892 100644 --- a/fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/Cargo.toml +++ b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "c_code_with_fork_executor" -version = "0.0.1" +version = "0.14.1" edition = "2021" [features] @@ -15,9 +15,10 @@ opt-level = 3 debug = true [dependencies] -libafl = { path = "../../../libafl/" } -libafl_bolts = { path = "../../../libafl_bolts/" } -libc = "0.2" +libafl = { path = "../../../../libafl" } +libafl_bolts = { path = "../../../../libafl_bolts" } +libc = "0.2.159" +log = { version = "0.4.22", features = ["release_max_level_info"] } [build-dependencies] -cc = "1.0" +cc = "1.1.21" diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/build.rs b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/build.rs similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/build.rs rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/build.rs diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/src/harness.c b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/src/harness.c similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/src/harness.c rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/src/harness.c diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/src/main.rs b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/src/main.rs similarity index 90% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/src/main.rs rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/src/main.rs index 78cb2c7a2e..694aea946f 100644 --- a/fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/src/main.rs +++ b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_fork_executor/src/main.rs @@ -1,4 +1,4 @@ -use std::{path::PathBuf, time::Duration}; +use std::{path::PathBuf, ptr::NonNull, time::Duration}; use libafl::{ corpus::{InMemoryCorpus, OnDiskCorpus}, @@ -10,13 +10,14 @@ use libafl::{ generators::RandPrintablesGenerator, inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::{BacktraceObserver, ConstMapObserver}, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, state::StdState, }; use libafl_bolts::{ + nonzero, ownedref::OwnedRefMut, rands::StdRand, shmem::{ShMemProvider, StdShMemProvider}, @@ -45,7 +46,14 @@ pub fn main() { libafl::executors::ExitKind::Ok }; // Create an observation channel using the signals map - let observer = unsafe { ConstMapObserver::::from_mut_ptr("signals", map_ptr) }; + let observer = unsafe { + ConstMapObserver::from_mut_ptr( + "signals", + NonNull::new(map_ptr) + .expect("map ptr is null.") + .cast::<[u8; 3]>(), + ) + }; // Create a stacktrace observer let mut bt = shmem_provider.new_on_shmem::>(None).unwrap(); let bt_observer = BacktraceObserver::new( @@ -103,7 +111,7 @@ pub fn main() { .expect("Failed to create the Executor"); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/.cargo/config b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/.cargo/config similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/.cargo/config rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/.cargo/config diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/.gitignore b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/.gitignore similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_fork_executor/.gitignore rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/.gitignore diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/Cargo.toml b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/Cargo.toml similarity index 52% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/Cargo.toml rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/Cargo.toml index 5eb3f1e931..819fef654c 100644 --- a/fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/Cargo.toml +++ b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "c_code_with_inprocess_executor" -version = "0.0.1" +version = "0.14.1" edition = "2021" [features] @@ -15,9 +15,10 @@ opt-level = 3 debug = true [dependencies] -libafl = { path = "../../../libafl/" } -libafl_bolts = { path = "../../../libafl_bolts/" } -libc = "0.2" +libafl = { path = "../../../../libafl" } +libafl_bolts = { path = "../../../../libafl_bolts" } +libc = "0.2.159" +log = { version = "0.4.22", features = ["release_max_level_info"] } [build-dependencies] -cc = "1.0" +cc = "1.1.21" diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/build.rs b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/build.rs similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/build.rs rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/build.rs diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/src/harness.c b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/src/harness.c similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/src/harness.c rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/src/harness.c diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/src/main.rs b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/src/main.rs similarity index 87% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/src/main.rs rename to fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/src/main.rs index 3dbe6f0ad0..88dece4d3e 100644 --- a/fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/src/main.rs +++ b/fuzzers/baby/backtrace_baby_fuzzers/c_code_with_inprocess_executor/src/main.rs @@ -1,4 +1,4 @@ -use std::path::PathBuf; +use std::{path::PathBuf, ptr::NonNull}; use libafl::{ corpus::{InMemoryCorpus, OnDiskCorpus}, @@ -10,13 +10,13 @@ use libafl::{ generators::RandPrintablesGenerator, inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::{BacktraceObserver, ConstMapObserver}, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, state::StdState, }; -use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice}; +use libafl_bolts::{nonzero, rands::StdRand, tuples::tuple_list, AsSlice}; use libc::c_uchar; extern crate libc; @@ -35,7 +35,14 @@ pub fn main() { libafl::executors::ExitKind::Ok }; // Create an observation channel using the signals map - let observer = unsafe { ConstMapObserver::::from_mut_ptr("signals", array_ptr) }; + let observer = unsafe { + ConstMapObserver::from_mut_ptr( + "signals", + NonNull::new(array_ptr) + .expect("map ptr is null") + .cast::<[u8; 3]>(), + ) + }; // Create a stacktrace observer let bt_observer = BacktraceObserver::owned( "BacktraceObserver", @@ -89,7 +96,7 @@ pub fn main() { .expect("Failed to create the Executor"); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/backtrace_baby_fuzzers/command_executor/.cargo/config b/fuzzers/baby/backtrace_baby_fuzzers/command_executor/.cargo/config similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/command_executor/.cargo/config rename to fuzzers/baby/backtrace_baby_fuzzers/command_executor/.cargo/config diff --git a/fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/.gitignore b/fuzzers/baby/backtrace_baby_fuzzers/command_executor/.gitignore similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/c_code_with_inprocess_executor/.gitignore rename to fuzzers/baby/backtrace_baby_fuzzers/command_executor/.gitignore diff --git a/fuzzers/backtrace_baby_fuzzers/command_executor/Cargo.toml b/fuzzers/baby/backtrace_baby_fuzzers/command_executor/Cargo.toml similarity index 51% rename from fuzzers/backtrace_baby_fuzzers/command_executor/Cargo.toml rename to fuzzers/baby/backtrace_baby_fuzzers/command_executor/Cargo.toml index 90d7377d6a..5a0b42dfc1 100644 --- a/fuzzers/backtrace_baby_fuzzers/command_executor/Cargo.toml +++ b/fuzzers/baby/backtrace_baby_fuzzers/command_executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "command_executor" -version = "0.0.1" +version = "0.14.1" edition = "2021" [features] @@ -14,8 +14,9 @@ opt-level = 3 debug = true [build-dependencies] -cc = "*" +cc = "1.1.21" [dependencies] -libafl = { path = "../../../libafl/" } -libafl_bolts = { path = "../../../libafl_bolts/" } +libafl = { path = "../../../../libafl" } +libafl_bolts = { path = "../../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/backtrace_baby_fuzzers/command_executor/build.rs b/fuzzers/baby/backtrace_baby_fuzzers/command_executor/build.rs similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/command_executor/build.rs rename to fuzzers/baby/backtrace_baby_fuzzers/command_executor/build.rs diff --git a/fuzzers/backtrace_baby_fuzzers/command_executor/src/main.rs b/fuzzers/baby/backtrace_baby_fuzzers/command_executor/src/main.rs similarity index 90% rename from fuzzers/backtrace_baby_fuzzers/command_executor/src/main.rs rename to fuzzers/baby/backtrace_baby_fuzzers/command_executor/src/main.rs index f4cb471658..841115bd30 100644 --- a/fuzzers/backtrace_baby_fuzzers/command_executor/src/main.rs +++ b/fuzzers/baby/backtrace_baby_fuzzers/command_executor/src/main.rs @@ -17,7 +17,7 @@ use libafl::{ generators::RandPrintablesGenerator, inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::{get_asan_runtime_flags, AsanBacktraceObserver, StdMapObserver}, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, @@ -25,6 +25,7 @@ use libafl::{ Error, }; use libafl_bolts::{ + nonzero, rands::StdRand, shmem::{unix_shmem, ShMem, ShMemId, ShMemProvider}, tuples::tuple_list, @@ -83,6 +84,7 @@ pub fn main() { #[derive(Debug)] struct MyExecutor { shmem_id: ShMemId, + timeout: Duration, } impl CommandConfigurator for MyExecutor { @@ -105,14 +107,19 @@ pub fn main() { } fn exec_timeout(&self) -> Duration { - Duration::from_secs(5) + self.timeout + } + fn exec_timeout_mut(&mut self) -> &mut Duration { + &mut self.timeout } } - let mut executor = MyExecutor { shmem_id }.into_executor(tuple_list!(observer, bt_observer)); + let timeout = Duration::from_secs(5); + let mut executor = + MyExecutor { shmem_id, timeout }.into_executor(tuple_list!(observer, bt_observer)); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/backtrace_baby_fuzzers/command_executor/src/test_command.c b/fuzzers/baby/backtrace_baby_fuzzers/command_executor/src/test_command.c similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/command_executor/src/test_command.c rename to fuzzers/baby/backtrace_baby_fuzzers/command_executor/src/test_command.c diff --git a/fuzzers/backtrace_baby_fuzzers/forkserver_executor/.gitignore b/fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/.gitignore similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/forkserver_executor/.gitignore rename to fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/.gitignore diff --git a/fuzzers/backtrace_baby_fuzzers/forkserver_executor/Cargo.toml b/fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/Cargo.toml similarity index 50% rename from fuzzers/backtrace_baby_fuzzers/forkserver_executor/Cargo.toml rename to fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/Cargo.toml index fa8933adb7..3bdd6b83f2 100644 --- a/fuzzers/backtrace_baby_fuzzers/forkserver_executor/Cargo.toml +++ b/fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "forkserver_executor" -version = "0.0.1" +version = "0.14.1" edition = "2021" @@ -14,5 +14,6 @@ codegen-units = 1 opt-level = 3 [dependencies] -libafl = { path = "../../../libafl/" } -libafl_bolts = { path = "../../../libafl_bolts/" } +libafl = { path = "../../../../libafl" } +libafl_bolts = { path = "../../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/backtrace_baby_fuzzers/forkserver_executor/build.rs b/fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/build.rs similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/forkserver_executor/build.rs rename to fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/build.rs diff --git a/fuzzers/backtrace_baby_fuzzers/forkserver_executor/src/main.rs b/fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/src/main.rs similarity index 92% rename from fuzzers/backtrace_baby_fuzzers/forkserver_executor/src/main.rs rename to fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/src/main.rs index 86fc4b41e9..220f822643 100644 --- a/fuzzers/backtrace_baby_fuzzers/forkserver_executor/src/main.rs +++ b/fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/src/main.rs @@ -10,7 +10,7 @@ use libafl::{ generators::RandPrintablesGenerator, inputs::BytesInput, monitors::SimpleMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::{AsanBacktraceObserver, ConstMapObserver, HitcountsMapObserver}, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, @@ -21,6 +21,7 @@ use libafl_bolts::shmem::StdShMemProvider; #[cfg(target_vendor = "apple")] use libafl_bolts::shmem::UnixShMemProvider; use libafl_bolts::{ + nonzero, rands::StdRand, shmem::{ShMem, ShMemProvider}, tuples::tuple_list, @@ -41,7 +42,10 @@ pub fn main() { let mut shmem = shmem_provider.new_shmem(MAP_SIZE).unwrap(); //let the forkserver know the shmid shmem.write_to_env("__AFL_SHM_ID").unwrap(); - let shmem_map = shmem.as_slice_mut(); + let shmem_map: &mut [u8; MAP_SIZE] = shmem + .as_slice_mut() + .try_into() + .expect("could not convert slice to sized slice."); // Create an observation channel using the signals map let edges_observer = HitcountsMapObserver::new(ConstMapObserver::<_, MAP_SIZE>::new( @@ -97,7 +101,7 @@ pub fn main() { .unwrap(); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(3); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/backtrace_baby_fuzzers/forkserver_executor/src/program.c b/fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/src/program.c similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/forkserver_executor/src/program.c rename to fuzzers/baby/backtrace_baby_fuzzers/forkserver_executor/src/program.c diff --git a/fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor/.cargo/config b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_fork_executor/.cargo/config similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor/.cargo/config rename to fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_fork_executor/.cargo/config diff --git a/fuzzers/backtrace_baby_fuzzers/command_executor/.gitignore b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_fork_executor/.gitignore similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/command_executor/.gitignore rename to fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_fork_executor/.gitignore diff --git a/fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor/Cargo.toml b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_fork_executor/Cargo.toml similarity index 57% rename from fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor/Cargo.toml rename to fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_fork_executor/Cargo.toml index e41acff6a0..e5a51745e1 100644 --- a/fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor/Cargo.toml +++ b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_fork_executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "rust_code_with_fork_executor" -version = "0.0.1" +version = "0.14.1" edition = "2021" [features] @@ -18,5 +18,6 @@ opt-level = 3 debug = true [dependencies] -libafl = { path = "../../../libafl/" } -libafl_bolts = { path = "../../../libafl_bolts/" } +libafl = { path = "../../../../libafl" } +libafl_bolts = { path = "../../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor/src/main.rs b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_fork_executor/src/main.rs similarity index 96% rename from fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor/src/main.rs rename to fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_fork_executor/src/main.rs index 1b6e2507cc..88c6422598 100644 --- a/fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor/src/main.rs +++ b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_fork_executor/src/main.rs @@ -12,13 +12,14 @@ use libafl::{ generators::RandPrintablesGenerator, inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::{BacktraceObserver, StdMapObserver}, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, state::StdState, }; use libafl_bolts::{ + nonzero, ownedref::OwnedRefMut, rands::StdRand, shmem::{unix_shmem, ShMem, ShMemProvider}, @@ -122,7 +123,7 @@ pub fn main() { .expect("Failed to create the Executor"); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/.cargo/config b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/.cargo/config similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/.cargo/config rename to fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/.cargo/config diff --git a/fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor/.gitignore b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/.gitignore similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/rust_code_with_fork_executor/.gitignore rename to fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/.gitignore diff --git a/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/Cargo.toml b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/Cargo.toml new file mode 100644 index 0000000000..a0f6350b15 --- /dev/null +++ b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "rust_code_with_inprocess_executor" +version = "0.14.1" +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../../libafl" } +libafl_bolts = { path = "../../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/src/main.rs b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/src/main.rs similarity index 95% rename from fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/src/main.rs rename to fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/src/main.rs index a58ef4ec6d..79578e729c 100644 --- a/fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/src/main.rs +++ b/fuzzers/baby/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/src/main.rs @@ -12,13 +12,13 @@ use libafl::{ generators::RandPrintablesGenerator, inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::{BacktraceObserver, StdMapObserver}, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, state::StdState, }; -use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice}; +use libafl_bolts::{nonzero, rands::StdRand, tuples::tuple_list, AsSlice}; /// Coverage map with explicit assignments due to the lack of instrumentation static mut SIGNALS: [u8; 16] = [0; 16]; @@ -113,7 +113,7 @@ pub fn main() { .expect("Failed to create the Executor"); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/baby/tutorial/Cargo.toml b/fuzzers/baby/tutorial/Cargo.toml new file mode 100644 index 0000000000..7e764b303e --- /dev/null +++ b/fuzzers/baby/tutorial/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "tutorial" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl", features = ["default", "rand_trait"] } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", + "sancov_cmplog", +] } +serde = { version = "1.0.210", default-features = false, features = [ + "alloc", +] } # serialization lib +lain = { version = "0.5.5", features = [ + "serde_support", +], git = "https://github.com/AFLplusplus/lain.git", rev = "208e927bcf411f62f8a1f51ac2d9f9423a1ec5d3" } # We're using a lain fork compatible with libafl's rand version +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } +log = { version = "0.4.22", features = ["release_max_level_info"] } + +[lib] +name = "tutorial" +crate-type = ["staticlib"] diff --git a/fuzzers/baby_no_std/rust-toolchain b/fuzzers/baby/tutorial/rust-toolchain similarity index 100% rename from fuzzers/baby_no_std/rust-toolchain rename to fuzzers/baby/tutorial/rust-toolchain diff --git a/fuzzers/tutorial/src/bin/libafl_cc.rs b/fuzzers/baby/tutorial/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/tutorial/src/bin/libafl_cc.rs rename to fuzzers/baby/tutorial/src/bin/libafl_cc.rs diff --git a/fuzzers/dynamic_analysis/src/bin/libafl_cxx.rs b/fuzzers/baby/tutorial/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/dynamic_analysis/src/bin/libafl_cxx.rs rename to fuzzers/baby/tutorial/src/bin/libafl_cxx.rs diff --git a/fuzzers/tutorial/src/input.rs b/fuzzers/baby/tutorial/src/input.rs similarity index 85% rename from fuzzers/tutorial/src/input.rs rename to fuzzers/baby/tutorial/src/input.rs index 0aec91454f..4c84e69026 100644 --- a/fuzzers/tutorial/src/input.rs +++ b/fuzzers/baby/tutorial/src/input.rs @@ -1,7 +1,10 @@ use std::hash::Hash; use lain::prelude::*; -use libafl::inputs::{HasTargetBytes, Input}; +use libafl::{ + corpus::CorpusId, + inputs::{HasTargetBytes, Input}, +}; use libafl_bolts::{ownedref::OwnedSlice, HasLen}; use serde::{Deserialize, Serialize}; @@ -45,8 +48,12 @@ pub enum PacketType { } impl Input for PacketData { - fn generate_name(&self, idx: usize) -> String { - format!("id_{idx}") + fn generate_name(&self, id: Option) -> String { + if let Some(id) = id { + format!("id_{}", id.0) + } else { + "id_unknown".into() + } } } diff --git a/fuzzers/tutorial/src/lib.rs b/fuzzers/baby/tutorial/src/lib.rs similarity index 95% rename from fuzzers/tutorial/src/lib.rs rename to fuzzers/baby/tutorial/src/lib.rs index b60b44eeed..9801ee7d76 100644 --- a/fuzzers/tutorial/src/lib.rs +++ b/fuzzers/baby/tutorial/src/lib.rs @@ -59,7 +59,11 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let mut harness = |input: &PacketData| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + // # Safety + // We're looking for crashes in there! + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -128,14 +132,15 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re // Setup a lain mutator with a mutational stage let mutator = LainMutator::new(); - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, PacketData, _, _> = + StdPowerMutationalStage::new(mutator); let mut stages = tuple_list!(calibration, power); // A minimization+queue policy to get testcasess from the corpus let scheduler = PacketLenMinimizerScheduler::new( &edges_observer, - PowerQueueScheduler::new(&mut state, &edges_observer, PowerSchedule::FAST), + PowerQueueScheduler::new(&mut state, &edges_observer, PowerSchedule::fast()), ); // A fuzzer with feedbacks and a corpus scheduler @@ -154,7 +159,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } diff --git a/fuzzers/tutorial/src/metadata.rs b/fuzzers/baby/tutorial/src/metadata.rs similarity index 72% rename from fuzzers/tutorial/src/metadata.rs rename to fuzzers/baby/tutorial/src/metadata.rs index 2115cdd0b9..0c93ebd5f7 100644 --- a/fuzzers/tutorial/src/metadata.rs +++ b/fuzzers/baby/tutorial/src/metadata.rs @@ -1,13 +1,11 @@ use std::borrow::Cow; use libafl::{ - corpus::Testcase, - events::EventFirer, + corpus::{Corpus, Testcase}, executors::ExitKind, - feedbacks::{Feedback, MapIndexesMetadata}, - observers::ObserversTuple, + feedbacks::{Feedback, MapIndexesMetadata, StateInitializer}, schedulers::{MinimizerScheduler, TestcaseScore}, - state::{HasCorpus, State}, + state::HasCorpus, Error, HasMetadata, }; use libafl_bolts::{Named, SerdeAny}; @@ -24,9 +22,12 @@ pub struct PacketLenTestcaseScore {} impl TestcaseScore for PacketLenTestcaseScore where - S: HasCorpus + HasMetadata, + S: HasMetadata + HasCorpus, { - fn compute(_state: &S, entry: &mut Testcase) -> Result { + fn compute( + _state: &S, + entry: &mut Testcase<::Input>, + ) -> Result { Ok(entry .metadata_map() .get::() @@ -34,37 +35,32 @@ where } } -pub type PacketLenMinimizerScheduler = - MinimizerScheduler; +pub type PacketLenMinimizerScheduler = + MinimizerScheduler; #[derive(Serialize, Deserialize, Default, Clone, Debug)] pub struct PacketLenFeedback { len: u64, } -impl Feedback for PacketLenFeedback -where - S: State, -{ +impl StateInitializer for PacketLenFeedback {} + +impl Feedback for PacketLenFeedback { #[inline] - fn is_interesting( + fn is_interesting( &mut self, _state: &mut S, _manager: &mut EM, input: &PacketData, _observers: &OT, _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { + ) -> Result { self.len = input.length; Ok(false) } #[inline] - fn append_metadata( + fn append_metadata( &mut self, _state: &mut S, _manager: &mut EM, diff --git a/fuzzers/tutorial/src/mutator.rs b/fuzzers/baby/tutorial/src/mutator.rs similarity index 100% rename from fuzzers/tutorial/src/mutator.rs rename to fuzzers/baby/tutorial/src/mutator.rs diff --git a/fuzzers/tutorial/target.c b/fuzzers/baby/tutorial/target.c similarity index 100% rename from fuzzers/tutorial/target.c rename to fuzzers/baby/tutorial/target.c diff --git a/fuzzers/baby_fuzzer/Cargo.toml b/fuzzers/baby_fuzzer/Cargo.toml deleted file mode 100644 index fe557e3466..0000000000 --- a/fuzzers/baby_fuzzer/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "baby_fuzzer" -version = "0.10.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -tui = [] -std = [] - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } diff --git a/fuzzers/baby_fuzzer_gramatron/Cargo.toml b/fuzzers/baby_fuzzer_gramatron/Cargo.toml deleted file mode 100644 index 90cf3bd181..0000000000 --- a/fuzzers/baby_fuzzer_gramatron/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "baby_fuzzer_gramatron" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -postcard = { version = "1.0", features = ["alloc"], default-features = false } # no_std compatible serde serialization format \ No newline at end of file diff --git a/fuzzers/baby_fuzzer_grimoire/Cargo.toml b/fuzzers/baby_fuzzer_grimoire/Cargo.toml deleted file mode 100644 index 2508cfc245..0000000000 --- a/fuzzers/baby_fuzzer_grimoire/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "baby_fuzzer_grimoire" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } diff --git a/fuzzers/baby_fuzzer_minimizing/Cargo.toml b/fuzzers/baby_fuzzer_minimizing/Cargo.toml deleted file mode 100644 index 36b6c98b1b..0000000000 --- a/fuzzers/baby_fuzzer_minimizing/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "baby_fuzzer_minimizing" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier ", "Addison Crump "] -edition = "2021" - -[features] -default = ["std"] -tui = [] -std = [] - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/", features = ["prelude"] } -libafl_bolts = { path = "../../libafl_bolts/", features = ["prelude"] } diff --git a/fuzzers/baby_fuzzer_multi/Cargo.toml b/fuzzers/baby_fuzzer_multi/Cargo.toml deleted file mode 100644 index 04ab2364b4..0000000000 --- a/fuzzers/baby_fuzzer_multi/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "baby_fuzzer_multi" -version = "0.10.0" -authors = ["Andrea Fioraldi ", "Dominik Maier ", "Addison Crump "] -edition = "2021" - -[features] -default = ["std"] -tui = [] -std = [] - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/", features = ["multipart_inputs"] } -libafl_bolts = { path = "../../libafl_bolts/" } diff --git a/fuzzers/baby_fuzzer_nautilus/Cargo.toml b/fuzzers/baby_fuzzer_nautilus/Cargo.toml deleted file mode 100644 index 36f0e0208f..0000000000 --- a/fuzzers/baby_fuzzer_nautilus/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "baby_fuzzer_nautilus" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/", features = ["default", "nautilus"] } -libafl_bolts = { path = "../../libafl_bolts/" } diff --git a/fuzzers/baby_fuzzer_swap_differential/Cargo.toml b/fuzzers/baby_fuzzer_swap_differential/Cargo.toml deleted file mode 100644 index 5851188313..0000000000 --- a/fuzzers/baby_fuzzer_swap_differential/Cargo.toml +++ /dev/null @@ -1,41 +0,0 @@ -[package] -name = "baby_fuzzer_swap_differential" -version = "0.13.0" -authors = ["Addison Crump "] -edition = "2021" -default-run = "fuzzer_sd" - -[features] -tui = [] -multimap = [] - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -anyhow = "1" -bindgen = "0.69.4" -cc = "1.0" - -[dependencies] -libafl = { path = "../../libafl" } -libafl_bolts = { path = "../../libafl_bolts" } -libafl_targets = { path = "../../libafl_targets", features = ["sancov_pcguard_hitcounts", "libfuzzer", "sancov_cmplog", "pointer_maps"] } -mimalloc = { version = "*", default-features = false } - -libafl_cc = { path = "../../libafl_cc/" } - -[[bin]] -name = "fuzzer_sd" -path = "src/main.rs" - -[[bin]] -name = "libafl_cc" -path = "src/bin/libafl_cc.rs" diff --git a/fuzzers/baby_fuzzer_tokens/Cargo.toml b/fuzzers/baby_fuzzer_tokens/Cargo.toml deleted file mode 100644 index bc000fb097..0000000000 --- a/fuzzers/baby_fuzzer_tokens/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "baby_fuzzer_tokens" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } diff --git a/fuzzers/baby_fuzzer_unicode/Cargo.toml b/fuzzers/baby_fuzzer_unicode/Cargo.toml deleted file mode 100644 index ff8bb2d732..0000000000 --- a/fuzzers/baby_fuzzer_unicode/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "baby_fuzzer_unicode" -version = "0.10.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -tui = [] -std = [] - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/", features = ["unicode"] } -libafl_bolts = { path = "../../libafl_bolts/" } diff --git a/fuzzers/baby_fuzzer_with_forkexecutor/Cargo.toml b/fuzzers/baby_fuzzer_with_forkexecutor/Cargo.toml deleted file mode 100644 index 84425a3465..0000000000 --- a/fuzzers/baby_fuzzer_with_forkexecutor/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "baby_fuzzer_with_forkexecutor" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } diff --git a/fuzzers/baby_no_std/Cargo.toml b/fuzzers/baby_no_std/Cargo.toml deleted file mode 100644 index dafc5ae928..0000000000 --- a/fuzzers/baby_no_std/Cargo.toml +++ /dev/null @@ -1,24 +0,0 @@ -[package] -name = "baby_no_std" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { default-features = false, path = "../../libafl/" } -libafl_bolts = { default-features = false, path = "../../libafl_bolts/" } -static-alloc = "0.2.3" - -[target.'cfg(unix)'.dependencies] -libc = "0.2" - diff --git a/fuzzers/frida_executable_libpng/.gitignore b/fuzzers/binary_only/frida_executable_libpng/.gitignore similarity index 100% rename from fuzzers/frida_executable_libpng/.gitignore rename to fuzzers/binary_only/frida_executable_libpng/.gitignore diff --git a/fuzzers/binary_only/frida_executable_libpng/Cargo.toml b/fuzzers/binary_only/frida_executable_libpng/Cargo.toml new file mode 100644 index 0000000000..2be49193a2 --- /dev/null +++ b/fuzzers/binary_only/frida_executable_libpng/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "frida_executable_fuzzer" +version = "0.14.1" +edition = "2021" + +[lib] +name = "frida_executable_fuzzer" +crate-type = ["cdylib"] + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl", features = [ + "std", + "llmp_compression", + "llmp_bind_public", + "frida_cli", +] } #, "llmp_small_maps", "llmp_debug"]} +libafl_bolts = { path = "../../../libafl_bolts" } +frida-gum = { version = "0.15.1", features = [ + "auto-download", + "event-sink", + "invocation-listener", + "script", +] } +libafl_frida = { path = "../../../libafl_frida", features = ["cmplog"] } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_cmplog", +] } +libc = "0.2.159" +libloading = "0.8.5" +log = { version = "0.4.22", features = ["release_max_level_info"] } +num-traits = "0.2.19" +rangemap = "1.5.1" +clap = { version = "4.5.18", features = ["derive"] } +serde = "1.0.210" +mimalloc = { version = "0.1.43", default-features = false } + +backtrace = "0.3.74" +color-backtrace = "0.6.1" diff --git a/fuzzers/frida_executable_libpng/Makefile.toml b/fuzzers/binary_only/frida_executable_libpng/Makefile.toml similarity index 74% rename from fuzzers/frida_executable_libpng/Makefile.toml rename to fuzzers/binary_only/frida_executable_libpng/Makefile.toml index 92868e51c7..8cc7ee95f1 100644 --- a/fuzzers/frida_executable_libpng/Makefile.toml +++ b/fuzzers/binary_only/frida_executable_libpng/Makefile.toml @@ -1,12 +1,16 @@ # Variables [env] -CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -17,9 +21,9 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.libpng_unix] -condition = { files_not_exist = ["./libpng-1.6.37"]} -script_runner="@shell" -script=''' +condition = { files_not_exist = ["./libpng-1.6.37"] } +script_runner = "@shell" +script = ''' wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz tar -xvf v1.6.37.tar.gz ''' @@ -31,13 +35,13 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd .. make -C libpng-1.6.37 ''' -dependencies = [ "libpng" ] +dependencies = ["libpng"] # Harness [tasks.harness] @@ -46,12 +50,12 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.harness_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' clang++ -O0 -c -fPIC harness.cc -o harness.o clang++ -O0 harness.cc libpng-1.6.37/.libs/libpng16.a -lz -o libpng-harness -g ''' -dependencies = [ "lib" ] +dependencies = ["lib"] # Fuzzer [tasks.fuzzer] @@ -60,8 +64,8 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.fuzzer_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cargo build --profile ${PROFILE} ''' @@ -73,10 +77,10 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' LD_PRELOAD=$CARGO_TARGET_DIR/${PROFILE_DIR}/libfrida_executable_fuzzer.so ./libpng-harness -i corpus -o out -H ./libpng-harness ''' -dependencies = [ "fuzzer", "harness" ] +dependencies = ["fuzzer", "harness"] # Test [tasks.test] @@ -86,7 +90,7 @@ windows_alias = "unsupported" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true LD_PRELOAD=$CARGO_TARGET_DIR/${PROFILE_DIR}/libfrida_executable_fuzzer.so ./libpng-harness -i corpus -o out -H ./libpng-harness > fuzz_stdout.log & sleep 10s && pkill libpng-harness @@ -97,7 +101,7 @@ else exit 1 fi ''' -dependencies = [ "fuzzer", "harness" ] +dependencies = ["fuzzer", "harness"] # Clean up [tasks.clean] @@ -108,8 +112,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./libpng-harness make -C libpng-1.6.37 clean cargo clean diff --git a/fuzzers/frida_executable_libpng/README.md b/fuzzers/binary_only/frida_executable_libpng/README.md similarity index 96% rename from fuzzers/frida_executable_libpng/README.md rename to fuzzers/binary_only/frida_executable_libpng/README.md index fd3cae6bbd..54c45ddd1a 100644 --- a/fuzzers/frida_executable_libpng/README.md +++ b/fuzzers/binary_only/frida_executable_libpng/README.md @@ -15,7 +15,7 @@ On unix platforms, you'll need [libc++](https://libcxx.llvm.org/) to build it. Alternatively you can run `cargo make run` and this command will automatically build and run the fuzzer ### Build For Android -When building for android using a cross-compiler, make sure you have a [_standalone toolchain_](https://developer.android.com/ndk/guides/standalone_toolchain), and then add the following: +When building for android using a cross-compiler, make sure you have a [_standalone toolchain_](https://developer.android.com/ndk/guides/other_build_systems), and then add the following: 1. In the ~/.cargo/config file add a target with the correct cross-compiler toolchain name (in this case aarch64-linux-android, but names may vary) `[target.aarch64-linux-android]` `linker="aarch64-linux-android-clang"` diff --git a/fuzzers/frida_executable_libpng/corpus/not_kitty.png b/fuzzers/binary_only/frida_executable_libpng/corpus/not_kitty.png similarity index 100% rename from fuzzers/frida_executable_libpng/corpus/not_kitty.png rename to fuzzers/binary_only/frida_executable_libpng/corpus/not_kitty.png diff --git a/fuzzers/frida_executable_libpng/corpus/not_kitty_alpha.png b/fuzzers/binary_only/frida_executable_libpng/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/frida_executable_libpng/corpus/not_kitty_alpha.png rename to fuzzers/binary_only/frida_executable_libpng/corpus/not_kitty_alpha.png diff --git a/fuzzers/frida_executable_libpng/corpus/not_kitty_gamma.png b/fuzzers/binary_only/frida_executable_libpng/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/frida_executable_libpng/corpus/not_kitty_gamma.png rename to fuzzers/binary_only/frida_executable_libpng/corpus/not_kitty_gamma.png diff --git a/fuzzers/frida_executable_libpng/corpus/not_kitty_icc.png b/fuzzers/binary_only/frida_executable_libpng/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/frida_executable_libpng/corpus/not_kitty_icc.png rename to fuzzers/binary_only/frida_executable_libpng/corpus/not_kitty_icc.png diff --git a/fuzzers/frida_executable_libpng/harness.cc b/fuzzers/binary_only/frida_executable_libpng/harness.cc similarity index 100% rename from fuzzers/frida_executable_libpng/harness.cc rename to fuzzers/binary_only/frida_executable_libpng/harness.cc diff --git a/fuzzers/frida_executable_libpng/src/fuzzer.rs b/fuzzers/binary_only/frida_executable_libpng/src/fuzzer.rs similarity index 94% rename from fuzzers/frida_executable_libpng/src/fuzzer.rs rename to fuzzers/binary_only/frida_executable_libpng/src/fuzzer.rs index 0cb1d15ba3..623e121a73 100644 --- a/fuzzers/frida_executable_libpng/src/fuzzer.rs +++ b/fuzzers/binary_only/frida_executable_libpng/src/fuzzer.rs @@ -5,7 +5,9 @@ use std::{path::PathBuf, ptr::null}; use frida_gum::Gum; use libafl::{ corpus::{CachedOnDiskCorpus, Corpus, OnDiskCorpus}, - events::{launcher::Launcher, llmp::LlmpRestartingEventManager, EventConfig}, + events::{ + launcher::Launcher, llmp::LlmpRestartingEventManager, ClientDescription, EventConfig, + }, executors::{inprocess::InProcessExecutor, ExitKind, ShadowExecutor}, feedback_or, feedback_or_fast, feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, @@ -13,7 +15,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::{I2SRandReplace, Tokens}, }, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, @@ -92,18 +95,22 @@ unsafe fn fuzz( let shmem_provider = StdShMemProvider::new()?; - let mut run_client = |state: Option<_>, mgr: LlmpRestartingEventManager<_, _, _>, core_id| { + let mut run_client = |state: Option<_>, + mgr: LlmpRestartingEventManager<_, _, _>, + client_description: ClientDescription| { // The restarting state will spawn the same process again as child, then restarted it each time it crashes. // println!("{:?}", mgr.mgr_id()); - if options.asan && options.asan_cores.contains(core_id) { - (|state: Option<_>, mut mgr: LlmpRestartingEventManager<_, _, _>, _core_id| { + if options.asan && options.asan_cores.contains(client_description.core_id()) { + (|state: Option<_>, + mut mgr: LlmpRestartingEventManager<_, _, _>, + _client_description| { let gum = Gum::obtain(); let coverage = CoverageRuntime::new(); #[cfg(unix)] - let asan = AsanRuntime::new(&options); + let asan = AsanRuntime::new(options); #[cfg(unix)] let mut frida_helper = @@ -221,9 +228,11 @@ unsafe fn fuzz( fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; Ok(()) - })(state, mgr, core_id) - } else if options.cmplog && options.cmplog_cores.contains(core_id) { - (|state: Option<_>, mut mgr: LlmpRestartingEventManager<_, _, _>, _core_id| { + })(state, mgr, client_description) + } else if options.cmplog && options.cmplog_cores.contains(client_description.core_id()) { + (|state: Option<_>, + mut mgr: LlmpRestartingEventManager<_, _, _>, + _client_description| { let gum = Gum::obtain(); let coverage = CoverageRuntime::new(); @@ -355,9 +364,11 @@ unsafe fn fuzz( fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; Ok(()) - })(state, mgr, core_id) + })(state, mgr, client_description) } else { - (|state: Option<_>, mut mgr: LlmpRestartingEventManager<_, _, _>, _core_id| { + (|state: Option<_>, + mut mgr: LlmpRestartingEventManager<_, _, _>, + _client_description| { let gum = Gum::obtain(); let coverage = CoverageRuntime::new(); @@ -472,7 +483,7 @@ unsafe fn fuzz( fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; Ok(()) - })(state, mgr, core_id) + })(state, mgr, client_description) } }; diff --git a/fuzzers/frida_executable_libpng/src/lib.rs b/fuzzers/binary_only/frida_executable_libpng/src/lib.rs similarity index 100% rename from fuzzers/frida_executable_libpng/src/lib.rs rename to fuzzers/binary_only/frida_executable_libpng/src/lib.rs diff --git a/fuzzers/frida_libpng/.gitignore b/fuzzers/binary_only/frida_libpng/.gitignore similarity index 100% rename from fuzzers/frida_libpng/.gitignore rename to fuzzers/binary_only/frida_libpng/.gitignore diff --git a/fuzzers/binary_only/frida_libpng/Cargo.toml b/fuzzers/binary_only/frida_libpng/Cargo.toml new file mode 100644 index 0000000000..59d011ef1c --- /dev/null +++ b/fuzzers/binary_only/frida_libpng/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "frida_fuzzer" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl", features = [ + "std", + "llmp_compression", + "llmp_bind_public", + "frida_cli", + "errors_backtrace", +] } #, "llmp_small_maps", "llmp_debug"]} +libafl_bolts = { path = "../../../libafl_bolts" } +frida-gum = { version = "0.15.1", features = [ + "auto-download", + "event-sink", + "invocation-listener", + "script", +] } +libafl_frida = { path = "../../../libafl_frida", features = ["cmplog"] } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_cmplog", +] } +libloading = "0.8.5" +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } +color-backtrace = "0.6.1" +env_logger = "0.11.5" diff --git a/fuzzers/frida_libpng/Makefile.toml b/fuzzers/binary_only/frida_libpng/Makefile.toml similarity index 73% rename from fuzzers/frida_libpng/Makefile.toml rename to fuzzers/binary_only/frida_libpng/Makefile.toml index 531f77445b..a4ea584847 100644 --- a/fuzzers/frida_libpng/Makefile.toml +++ b/fuzzers/binary_only/frida_libpng/Makefile.toml @@ -1,13 +1,17 @@ # Variables [env] -CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -FUZZER_NAME={ source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = "frida_fuzzer", mapping = {"linux" = "frida_fuzzer", "macos" = "frida_fuzzer", "windows" = "frida_fuzzer.exe"} } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +FUZZER_NAME = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = "frida_fuzzer", mapping = { "linux" = "frida_fuzzer", "macos" = "frida_fuzzer", "windows" = "frida_fuzzer.exe" } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -18,9 +22,9 @@ mac_alias = "libpng_unix" windows_alias = "unsupported" [tasks.libpng_unix] -condition = { files_not_exist = ["./libpng-1.6.37"]} -script_runner="@shell" -script=''' +condition = { files_not_exist = ["./libpng-1.6.37"] } +script_runner = "@shell" +script = ''' wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz tar -xvf v1.6.37.tar.gz ''' @@ -32,13 +36,13 @@ mac_alias = "lib_unix" windows_alias = "unsupported" [tasks.lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes --disable-dependency-tracking cd .. make -C libpng-1.6.37 ''' -dependencies = [ "libpng" ] +dependencies = ["libpng"] # Harness [tasks.harness] @@ -47,16 +51,16 @@ mac_alias = "harness_unix" windows_alias = "harness_windows" [tasks.harness_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' clang++ -O3 -c -fPIC harness.cc -o harness.o clang++ -O3 harness.o libpng-1.6.37/.libs/libpng16.a -shared -lz -o libpng-harness.so ''' -dependencies = [ "lib" ] +dependencies = ["lib"] [tasks.harness_windows] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cl /c harness_win.cpp && link harness_win.obj /dll ''' @@ -67,15 +71,15 @@ mac_alias = "fuzzer_unix" windows_alias = "fuzzer_windows" [tasks.fuzzer_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cargo build --profile ${PROFILE} cp ${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME} . ''' [tasks.fuzzer_windows] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cargo build --profile ${PROFILE} cp ./target/${PROFILE_DIR}/${FUZZER_NAME} . ''' @@ -88,17 +92,17 @@ windows_alias = "run_windows" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} -F LLVMFuzzerTestOneInput -H ./libpng-harness.so -l ./libpng-harness.so ''' -dependencies = [ "fuzzer", "harness" ] +dependencies = ["fuzzer", "harness"] [tasks.run_windows] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} -F LLVMFuzzerTestOneInput -H ./harness_win.dll -l ./harness_win.dll --cores=0 ''' -dependencies = [ "fuzzer", "harness" ] +dependencies = ["fuzzer", "harness"] # Test [tasks.test] @@ -108,7 +112,7 @@ windows_alias = "test_windows" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 30s ./${FUZZER_NAME} -F LLVMFuzzerTestOneInput -H ./libpng-harness.so -l ./libpng-harness.so | tee fuzz_stdout.log 2>/dev/null || true if grep -qa "corpus: 70" fuzz_stdout.log; then @@ -118,26 +122,26 @@ else exit 1 fi ''' -dependencies = [ "fuzzer", "harness" ] +dependencies = ["fuzzer", "harness"] # Don't grep and check the result on macOS because it's unstable [tasks.test_mac] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 30s ./${FUZZER_NAME} -F LLVMFuzzerTestOneInput -H ./libpng-harness.so -l ./libpng-harness.so | tee fuzz_stdout.log 2>/dev/null || true ''' -dependencies = [ "fuzzer", "harness" ] +dependencies = ["fuzzer", "harness"] [tasks.test_windows] script_runner = "@shell" -script=''' +script = ''' start "" "frida_fuzzer.exe" -F LLVMFuzzerTestOneInput -H ./harness_win.dll -l ./harness_win.dll --cores=0 #ping is for timeout ping -n 10 127.0.0.1>NUL && taskkill /im frida_fuzzer.exe /F >nul 2>nul dir /a-d "corpus_discovered\*" && (echo Files exist) || (exit /b 1337) ''' -dependencies = [ "fuzzer", "harness" ] +dependencies = ["fuzzer", "harness"] # Clean up [tasks.clean] @@ -148,8 +152,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} make -C libpng-1.6.37 clean cargo clean diff --git a/fuzzers/frida_libpng/README.md b/fuzzers/binary_only/frida_libpng/README.md similarity index 92% rename from fuzzers/frida_libpng/README.md rename to fuzzers/binary_only/frida_libpng/README.md index d337f7ce08..648a702296 100644 --- a/fuzzers/frida_libpng/README.md +++ b/fuzzers/binary_only/frida_libpng/README.md @@ -15,7 +15,7 @@ On unix platforms, you'll need [libc++](https://libcxx.llvm.org/) to build it. Alternatively you can run `cargo make run` and this command will automatically build and run the fuzzer ### Build For Android -When building for android using a cross-compiler, make sure you have a [_standalone toolchain_](https://developer.android.com/ndk/guides/standalone_toolchain), and then add the following: +When building for android using a cross-compiler, make sure you have a [_standalone toolchain_](https://developer.android.com/ndk/guides/other_build_systems), and then add the following: 1. In the ~/.cargo/config file add a target with the correct cross-compiler toolchain name (in this case aarch64-linux-android, but names may vary) `[target.aarch64-linux-android]` `linker="aarch64-linux-android-clang"` @@ -38,7 +38,7 @@ You can also fuzz libpng-1.6.37 on windows with frida mode ### To build it with visual studio 1. Install clang for windows (make sure you add LLVM to the system path!) [https://github.com/llvm/llvm-project/releases/tag/llvmorg-12.0.1](https://github.com/llvm/llvm-project/releases/tag/llvmorg-12.0.1) -2. Download libpng-1.6.37[https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz] and zlib [https://zlib.net/fossils/zlib-1.2.11.tar.gz] into this directory, and rename `zlib-1.2.11` directory to `zlib`. +2. Download [libpng-1.6.37](https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz) and [zlib](https://zlib.net/fossils/zlib-1.2.11.tar.gz) into this directory, and rename `zlib-1.2.11` directory to `zlib`. 3. Build libpng1.6.37 - Open libpng-1.6.37/projects/vstudio/vstudio.sln diff --git a/fuzzers/binary_only/frida_libpng/build.rs b/fuzzers/binary_only/frida_libpng/build.rs new file mode 100644 index 0000000000..538285b196 --- /dev/null +++ b/fuzzers/binary_only/frida_libpng/build.rs @@ -0,0 +1,3 @@ +fn main() { + println!("cargo:rustc-link-arg=-rdynamic"); +} diff --git a/fuzzers/frida_gdiplus/corpus/not_kitty.png b/fuzzers/binary_only/frida_libpng/corpus/not_kitty.png similarity index 100% rename from fuzzers/frida_gdiplus/corpus/not_kitty.png rename to fuzzers/binary_only/frida_libpng/corpus/not_kitty.png diff --git a/fuzzers/frida_gdiplus/corpus/not_kitty_alpha.png b/fuzzers/binary_only/frida_libpng/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/frida_gdiplus/corpus/not_kitty_alpha.png rename to fuzzers/binary_only/frida_libpng/corpus/not_kitty_alpha.png diff --git a/fuzzers/frida_gdiplus/corpus/not_kitty_gamma.png b/fuzzers/binary_only/frida_libpng/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/frida_gdiplus/corpus/not_kitty_gamma.png rename to fuzzers/binary_only/frida_libpng/corpus/not_kitty_gamma.png diff --git a/fuzzers/frida_gdiplus/corpus/not_kitty_icc.png b/fuzzers/binary_only/frida_libpng/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/frida_gdiplus/corpus/not_kitty_icc.png rename to fuzzers/binary_only/frida_libpng/corpus/not_kitty_icc.png diff --git a/fuzzers/frida_libpng/harness.cc b/fuzzers/binary_only/frida_libpng/harness.cc similarity index 100% rename from fuzzers/frida_libpng/harness.cc rename to fuzzers/binary_only/frida_libpng/harness.cc diff --git a/fuzzers/frida_libpng/harness_win.cpp b/fuzzers/binary_only/frida_libpng/harness_win.cpp similarity index 82% rename from fuzzers/frida_libpng/harness_win.cpp rename to fuzzers/binary_only/frida_libpng/harness_win.cpp index 5ccc0c104f..bfc42d3074 100644 --- a/fuzzers/frida_libpng/harness_win.cpp +++ b/fuzzers/binary_only/frida_libpng/harness_win.cpp @@ -4,7 +4,7 @@ #include extern "C" __declspec(dllexport) size_t - LLVMFuzzerTestOneInput(const char *data, unsigned int len) { +LLVMFuzzerTestOneInput(const char *data, unsigned int len) { if (data[0] == 'b') { if (data[1] == 'a') { if (data[2] == 'd') { diff --git a/fuzzers/frida_libpng/src/fuzzer.rs b/fuzzers/binary_only/frida_libpng/src/fuzzer.rs similarity index 95% rename from fuzzers/frida_libpng/src/fuzzer.rs rename to fuzzers/binary_only/frida_libpng/src/fuzzer.rs index 63c8453767..31973d11fc 100644 --- a/fuzzers/frida_libpng/src/fuzzer.rs +++ b/fuzzers/binary_only/frida_libpng/src/fuzzer.rs @@ -5,7 +5,9 @@ use std::path::PathBuf; use frida_gum::Gum; use libafl::{ corpus::{CachedOnDiskCorpus, Corpus, OnDiskCorpus}, - events::{launcher::Launcher, llmp::LlmpRestartingEventManager, EventConfig}, + events::{ + launcher::Launcher, llmp::LlmpRestartingEventManager, ClientDescription, EventConfig, + }, executors::{inprocess::InProcessExecutor, ExitKind, ShadowExecutor}, feedback_or, feedback_or_fast, feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, @@ -13,7 +15,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::{I2SRandReplace, Tokens}, }, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, @@ -72,7 +75,9 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { let shmem_provider = StdShMemProvider::new()?; - let mut run_client = |state: Option<_>, mgr: LlmpRestartingEventManager<_, _, _>, core_id| { + let mut run_client = |state: Option<_>, + mgr: LlmpRestartingEventManager<_, _, _>, + client_description: ClientDescription| { // The restarting state will spawn the same process again as child, then restarted it each time it crashes. // println!("{:?}", mgr.mgr_id()); @@ -89,8 +94,10 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { ExitKind::Ok }; - if options.asan && options.asan_cores.contains(core_id) { - (|state: Option<_>, mut mgr: LlmpRestartingEventManager<_, _, _>, _core_id| { + if options.asan && options.asan_cores.contains(client_description.core_id()) { + (|state: Option<_>, + mut mgr: LlmpRestartingEventManager<_, _, _>, + _client_description| { let gum = Gum::obtain(); let coverage = CoverageRuntime::new(); @@ -213,9 +220,11 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; Ok(()) - })(state, mgr, core_id) - } else if options.cmplog && options.cmplog_cores.contains(core_id) { - (|state: Option<_>, mut mgr: LlmpRestartingEventManager<_, _, _>, _core_id| { + })(state, mgr, client_description) + } else if options.cmplog && options.cmplog_cores.contains(client_description.core_id()) { + (|state: Option<_>, + mut mgr: LlmpRestartingEventManager<_, _, _>, + _client_description| { let gum = Gum::obtain(); let coverage = CoverageRuntime::new(); @@ -348,9 +357,11 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; Ok(()) - })(state, mgr, core_id) + })(state, mgr, client_description) } else { - (|state: Option<_>, mut mgr: LlmpRestartingEventManager<_, _, _>, _core_id| { + (|state: Option<_>, + mut mgr: LlmpRestartingEventManager<_, _, _>, + _client_description| { let gum = Gum::obtain(); let coverage = CoverageRuntime::new(); @@ -465,7 +476,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; Ok(()) - })(state, mgr, core_id) + })(state, mgr, client_description) } }; diff --git a/fuzzers/frida_gdiplus/src/main.rs b/fuzzers/binary_only/frida_libpng/src/main.rs similarity index 100% rename from fuzzers/frida_gdiplus/src/main.rs rename to fuzzers/binary_only/frida_libpng/src/main.rs diff --git a/fuzzers/frida_gdiplus/.gitignore b/fuzzers/binary_only/frida_windows_gdiplus/.gitignore similarity index 100% rename from fuzzers/frida_gdiplus/.gitignore rename to fuzzers/binary_only/frida_windows_gdiplus/.gitignore diff --git a/fuzzers/binary_only/frida_windows_gdiplus/Cargo.toml b/fuzzers/binary_only/frida_windows_gdiplus/Cargo.toml new file mode 100644 index 0000000000..43b2252748 --- /dev/null +++ b/fuzzers/binary_only/frida_windows_gdiplus/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "frida_windows_gdiplus" +version = "0.14.1" +authors = ["Richard Johnson "] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl", features = [ + "std", + "llmp_compression", + "llmp_bind_public", + "frida_cli", + "errors_backtrace", +] } #, "llmp_small_maps", "llmp_debug"]} +libafl_bolts = { path = "../../../libafl_bolts" } +frida-gum = { version = "0.15.1", features = [ + "auto-download", + "event-sink", + "invocation-listener", + "script", +] } +libafl_frida = { path = "../../../libafl_frida", features = ["cmplog"] } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_cmplog", +] } +libloading = "0.8.5" +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } +dlmalloc = { version = "0.2.6", features = ["global"] } +color-backtrace = "0.6.1" +env_logger = "0.11.5" +iced-x86 = { version = "1.21.0", features = ["code_asm"] } diff --git a/fuzzers/frida_gdiplus/Makefile.toml b/fuzzers/binary_only/frida_windows_gdiplus/Makefile.toml similarity index 56% rename from fuzzers/frida_gdiplus/Makefile.toml rename to fuzzers/binary_only/frida_windows_gdiplus/Makefile.toml index 5d7f24b555..f8cb981ab3 100644 --- a/fuzzers/frida_gdiplus/Makefile.toml +++ b/fuzzers/binary_only/frida_windows_gdiplus/Makefile.toml @@ -1,13 +1,17 @@ # Variables [env] -CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -FUZZER_NAME={ source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = "frida_gdiplus", mapping = {"linux" = "frida_gdiplus", "macos" = "frida_gdiplus", "windows" = "frida_gdiplus.exe"} } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +FUZZER_NAME = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = "frida_windows_gdiplus", mapping = { "linux" = "frida_windows_gdiplus", "macos" = "frida_windows_gdiplus", "windows" = "frida_windows_gdiplus.exe" } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -19,14 +23,14 @@ windows_alias = "harness_windows" [tasks.harness_windows] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cl.exe /LD harness.cc /link /dll gdiplus.lib ole32.lib ''' [tasks.harness_windows_cmplog_test] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' ml64 cmplog_test.asm /subsystem:windows /link /dll /def:cmplog_test.def /entry:dll_main /out:cmplog.dll ''' @@ -37,8 +41,8 @@ mac_alias = "unsupported" windows_alias = "fuzzer_windows" [tasks.fuzzer_windows] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cargo build --profile ${PROFILE} cp ./target/${PROFILE_DIR}/${FUZZER_NAME} . ''' @@ -51,10 +55,10 @@ windows_alias = "run_windows" [tasks.run_windows] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} -H harness.dll -i corpus -o output --libs-to-instrument gdi32.dll --libs-to-instrument gdi32full.dll --libs-to-instrument gdiplus.dll --libs-to-instrument WindowsCodecs.dll --disable-excludes ''' -dependencies = [ "fuzzer", "harness" ] +dependencies = ["fuzzer", "harness"] # Test [tasks.test] @@ -69,27 +73,27 @@ windows_alias = "test_windows_cmplog" [tasks.test_windows_cmplog] script_runner = "@shell" -script=''' +script = ''' @echo off for %%i in (t1 t2 t3 t4 t5 t6 t7) do ( echo Testing %%i... rmdir /s /q output_%%i - start "" "frida_gdiplus.exe" -H cmplog.dll -i corpus -o output_%%i --libs-to-instrument cmplog.dll -F %%i -C - ping -n 3 127.0.0.1>NUL && taskkill /im frida_gdiplus.exe /F + start "" "frida_windows_gdiplus.exe" -H cmplog.dll -i corpus -o output_%%i --libs-to-instrument cmplog.dll -F %%i -C + ping -n 3 127.0.0.1>NUL && taskkill /im frida_windows_gdiplus.exe /F >nul 2>nul dir /a-d "output_%%i" && (echo Files exist) || (exit /b 1337) ) echo All tests done ''' -dependencies = [ "fuzzer", "harness_windows_cmplog_test" ] +dependencies = ["fuzzer", "harness_windows_cmplog_test"] [tasks.test_windows] script_runner = "@shell" -script=''' -start "" "frida_gdiplus.exe" -H harness.dll -i corpus -o output --libs-to-instrument gdi32.dll --libs-to-instrument gdi32full.dll --libs-to-instrument gdiplus.dll --libs-to-instrument WindowsCodecs.dll --disable-excludes +script = ''' +start "" "frida_windows_gdiplus.exe" -H harness.dll -i corpus -o output --libs-to-instrument gdi32.dll --libs-to-instrument gdi32full.dll --libs-to-instrument gdiplus.dll --libs-to-instrument WindowsCodecs.dll --disable-excludes #ping is for timeout -ping -n 10 127.0.0.1>NUL && taskkill /im frida_gdiplus.exe /F +ping -n 10 127.0.0.1>NUL && taskkill /im frida_windows_gdiplus.exe /F >nul 2>nul dir /a-d "corpus_discovered\*" && (echo Files exist) || (exit /b 1337) ''' -dependencies = [ "fuzzer", "harness" ] +dependencies = ["fuzzer", "harness"] diff --git a/fuzzers/binary_only/frida_windows_gdiplus/README.md b/fuzzers/binary_only/frida_windows_gdiplus/README.md new file mode 100644 index 0000000000..e432d5ac43 --- /dev/null +++ b/fuzzers/binary_only/frida_windows_gdiplus/README.md @@ -0,0 +1,14 @@ +# LibAFL Frida_Windows_GdiPlus Example + +This is a an example how to fuzz binary-only dlls on Windows. +The example fuzzer will explore [gdiplus](https://learn.microsoft.com/en-us/windows/win32/gdiplus/-gdiplus-gdi-start) on Windows, using the [Frida](https://frida.re/) DBI. + +## Build + +To build this example, run `cargo build --release` in this folder. + +Then compile the harness `cl.exe /LD harness.cc /link /dll gdiplus.lib ole32.lib` + +## Run + +To run the example `target\release\frida_windows_gdiplus.exe -H harness.dll -i corpus -o output --libs-to-instrument gdi32.dll --libs-to-instrument gdi32full.dll --libs-to-instrument gdiplus.dll --libs-to-instrument WindowsCodecs.dll --disable-excludes` diff --git a/fuzzers/frida_gdiplus/cargo/.config b/fuzzers/binary_only/frida_windows_gdiplus/cargo/.config similarity index 100% rename from fuzzers/frida_gdiplus/cargo/.config rename to fuzzers/binary_only/frida_windows_gdiplus/cargo/.config diff --git a/fuzzers/frida_gdiplus/cmplog_test.asm b/fuzzers/binary_only/frida_windows_gdiplus/cmplog_test.asm similarity index 100% rename from fuzzers/frida_gdiplus/cmplog_test.asm rename to fuzzers/binary_only/frida_windows_gdiplus/cmplog_test.asm diff --git a/fuzzers/frida_gdiplus/cmplog_test.def b/fuzzers/binary_only/frida_windows_gdiplus/cmplog_test.def similarity index 100% rename from fuzzers/frida_gdiplus/cmplog_test.def rename to fuzzers/binary_only/frida_windows_gdiplus/cmplog_test.def diff --git a/fuzzers/frida_libpng/corpus/not_kitty.png b/fuzzers/binary_only/frida_windows_gdiplus/corpus/not_kitty.png similarity index 100% rename from fuzzers/frida_libpng/corpus/not_kitty.png rename to fuzzers/binary_only/frida_windows_gdiplus/corpus/not_kitty.png diff --git a/fuzzers/frida_libpng/corpus/not_kitty_alpha.png b/fuzzers/binary_only/frida_windows_gdiplus/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/frida_libpng/corpus/not_kitty_alpha.png rename to fuzzers/binary_only/frida_windows_gdiplus/corpus/not_kitty_alpha.png diff --git a/fuzzers/frida_libpng/corpus/not_kitty_gamma.png b/fuzzers/binary_only/frida_windows_gdiplus/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/frida_libpng/corpus/not_kitty_gamma.png rename to fuzzers/binary_only/frida_windows_gdiplus/corpus/not_kitty_gamma.png diff --git a/fuzzers/frida_libpng/corpus/not_kitty_icc.png b/fuzzers/binary_only/frida_windows_gdiplus/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/frida_libpng/corpus/not_kitty_icc.png rename to fuzzers/binary_only/frida_windows_gdiplus/corpus/not_kitty_icc.png diff --git a/fuzzers/frida_gdiplus/harness.cc b/fuzzers/binary_only/frida_windows_gdiplus/harness.cc similarity index 100% rename from fuzzers/frida_gdiplus/harness.cc rename to fuzzers/binary_only/frida_windows_gdiplus/harness.cc diff --git a/fuzzers/frida_gdiplus/src/fuzzer.rs b/fuzzers/binary_only/frida_windows_gdiplus/src/fuzzer.rs similarity index 94% rename from fuzzers/frida_gdiplus/src/fuzzer.rs rename to fuzzers/binary_only/frida_windows_gdiplus/src/fuzzer.rs index 8f9fdbe8de..c33fca2f81 100644 --- a/fuzzers/frida_gdiplus/src/fuzzer.rs +++ b/fuzzers/binary_only/frida_windows_gdiplus/src/fuzzer.rs @@ -22,7 +22,9 @@ use std::path::PathBuf; use frida_gum::Gum; use libafl::{ corpus::{CachedOnDiskCorpus, Corpus, OnDiskCorpus}, - events::{launcher::Launcher, llmp::LlmpRestartingEventManager, EventConfig}, + events::{ + launcher::Launcher, llmp::LlmpRestartingEventManager, ClientDescription, EventConfig, + }, executors::{inprocess::InProcessExecutor, ExitKind, ShadowExecutor}, feedback_and_fast, feedback_or, feedback_or_fast, feedbacks::{ConstFeedback, CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, @@ -30,7 +32,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::{I2SRandReplace, Tokens}, }, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, @@ -81,7 +84,9 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { let shmem_provider = StdShMemProvider::new()?; - let mut run_client = |state: Option<_>, mgr: LlmpRestartingEventManager<_, _, _>, core_id| { + let mut run_client = |state: Option<_>, + mgr: LlmpRestartingEventManager<_, _, _>, + client_description: ClientDescription| { // The restarting state will spawn the same process again as child, then restarted it each time it crashes. // println!("{:?}", mgr.mgr_id()); @@ -98,12 +103,14 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { ExitKind::Ok }; - if options.asan && options.asan_cores.contains(core_id) { - (|state: Option<_>, mut mgr: LlmpRestartingEventManager<_, _, _>, _core_id| { + if options.asan && options.asan_cores.contains(client_description.core_id()) { + (|state: Option<_>, + mut mgr: LlmpRestartingEventManager<_, _, _>, + _client_description| { let gum = Gum::obtain(); let coverage = CoverageRuntime::new(); - let asan = AsanRuntime::new(&options); + let asan = AsanRuntime::new(options); let mut frida_helper = FridaInstrumentationHelper::new(&gum, options, tuple_list!(coverage, asan)); @@ -211,9 +218,11 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; Ok(()) - })(state, mgr, core_id) - } else if options.cmplog && options.cmplog_cores.contains(core_id) { - (|state: Option<_>, mut mgr: LlmpRestartingEventManager<_, _, _>, _core_id| { + })(state, mgr, client_description) + } else if options.cmplog && options.cmplog_cores.contains(client_description.core_id()) { + (|state: Option<_>, + mut mgr: LlmpRestartingEventManager<_, _, _>, + _client_description| { let gum = Gum::obtain(); let coverage = CoverageRuntime::new(); @@ -339,9 +348,11 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; Ok(()) - })(state, mgr, core_id) + })(state, mgr, client_description) } else { - (|state: Option<_>, mut mgr: LlmpRestartingEventManager<_, _, _>, _core_id| { + (|state: Option<_>, + mut mgr: LlmpRestartingEventManager<_, _, _>, + _client_description| { let gum = Gum::obtain(); let coverage = CoverageRuntime::new(); @@ -453,7 +464,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { .unwrap(); Ok(()) - })(state, mgr, core_id) + })(state, mgr, client_description) } }; diff --git a/fuzzers/frida_libpng/src/main.rs b/fuzzers/binary_only/frida_windows_gdiplus/src/main.rs similarity index 100% rename from fuzzers/frida_libpng/src/main.rs rename to fuzzers/binary_only/frida_windows_gdiplus/src/main.rs diff --git a/fuzzers/baby_fuzzer_multi/.gitignore b/fuzzers/binary_only/fuzzbench_fork_qemu/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer_multi/.gitignore rename to fuzzers/binary_only/fuzzbench_fork_qemu/.gitignore diff --git a/fuzzers/binary_only/fuzzbench_fork_qemu/Cargo.toml b/fuzzers/binary_only/fuzzbench_fork_qemu/Cargo.toml new file mode 100644 index 0000000000..a02213c7fa --- /dev/null +++ b/fuzzers/binary_only/fuzzbench_fork_qemu/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "fuzzbench_fork_qemu" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[profile.release-fuzzbench] +inherits = "release" +debug = false +strip = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_qemu = { path = "../../../libafl_qemu", features = [ + "x86_64", + "usermode", +] } +libafl_targets = { path = "../../../libafl_targets" } + +log = { version = "0.4.22", features = ["release_max_level_info"] } +clap = { version = "4.5.18", features = ["default"] } +nix = { version = "0.29.0", features = ["fs"] } diff --git a/fuzzers/binary_only/fuzzbench_fork_qemu/Makefile.toml b/fuzzers/binary_only/fuzzbench_fork_qemu/Makefile.toml new file mode 100644 index 0000000000..7636140bca --- /dev/null +++ b/fuzzers/binary_only/fuzzbench_fork_qemu/Makefile.toml @@ -0,0 +1,115 @@ +env_scripts = [''' +#!@duckscript +profile = get_env PROFILE + +if eq ${profile} "dev" + set_env PROFILE_DIR debug +else + set_env PROFILE_DIR ${profile} +end +''', ''' +#!@duckscript +runs_on_ci = get_env RUN_ON_CI + +if ${runs_on_ci} + cargo_target_dir = get_env CARGO_MAKE_CRATE_TARGET_DIRECTORY + set_env TARGET_DIR ${cargo_target_dir} +end +'''] + +# Variables +[env] +FUZZER_NAME = 'harness' +PROJECT_DIR = { script = ["pwd"] } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +TARGET_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}" + +[tasks.unsupported] +script_runner = "@shell" +script = ''' +echo "Qemu fuzzer not supported on windows" +''' + +# fuzzer +[tasks.fuzzer] +linux_alias = "fuzzer_unix" +mac_alias = "fuzzer_unix" +windows_alias = "unsupported" + +[tasks.fuzzer_unix] +command = "cargo" +args = ["build", "--profile", "${PROFILE}"] + +# Harness +[tasks.harness] +linux_alias = "harness_unix" +mac_alias = "harness_unix" +windows_alias = "unsupported" + +[tasks.harness_unix] +script_runner = "@shell" +script = ''' +cc -c "${PROJECT_DIR}/libfuzzer_main.c" +cc \ + ./fuzz.c \ + ./libfuzzer_main.o \ + -o ${FUZZER_NAME} \ + -lm -lz +''' + +# Run the fuzzer +[tasks.run] +linux_alias = "run_unix" +mac_alias = "run_unix" +windows_alias = "unsupported" + +[tasks.run_unix] +command = "cargo" +args = [ + "run", + "--profile", + "${PROFILE}", + "./${FUZZER_NAME}", + "--", + "--libafl-in", + "./corpus", + "--libafl-out", + "./out", + "./${FUZZER_NAME}", +] +dependencies = ["harness"] + +# Run the fuzzer +[tasks.test] +linux_alias = "test_unix" +mac_alias = "test_unix" +windows_alias = "unsupported" + +# Short test +[tasks.test_unix] +script_runner = "@shell" +script = ''' +timeout 15s ${TARGET_DIR}/${PROFILE_DIR}/fuzzbench_fork_qemu ${PROJECT_DIR}/harness -- --libafl-in ${PROJECT_DIR}/../../inprocess/libfuzzer_libpng/corpus --libafl-out ${PROJECT_DIR}/out ${PROJECT_DIR}/harness | tee fuzz_stdout.log +if grep -qa "objectives: 1" fuzz_stdout.log; then + echo "Fuzzer is working" +else + echo "Fuzzer does not generate any testcases or any crashes" + exit 1 +fi +''' +dependencies = ["harness", "fuzzer"] + +# Clean up +[tasks.clean] +linux_alias = "clean_unix" +mac_alias = "clean_unix" +windows_alias = "unsupported" + +[tasks.clean_unix] +# Disable default `clean` definition +clear = true +script_runner = "@shell" +script = ''' +rm -f ./${FUZZER_NAME} +cargo clean +''' diff --git a/fuzzers/fuzzbench/fuzz.c b/fuzzers/binary_only/fuzzbench_fork_qemu/fuzz.c similarity index 100% rename from fuzzers/fuzzbench/fuzz.c rename to fuzzers/binary_only/fuzzbench_fork_qemu/fuzz.c diff --git a/fuzzers/fuzzbench_fork_qemu/libfuzzer_main.c b/fuzzers/binary_only/fuzzbench_fork_qemu/libfuzzer_main.c similarity index 100% rename from fuzzers/fuzzbench_fork_qemu/libfuzzer_main.c rename to fuzzers/binary_only/fuzzbench_fork_qemu/libfuzzer_main.c diff --git a/fuzzers/fuzzbench_fork_qemu/src/fuzzer.rs b/fuzzers/binary_only/fuzzbench_fork_qemu/src/fuzzer.rs similarity index 83% rename from fuzzers/fuzzbench_fork_qemu/src/fuzzer.rs rename to fuzzers/binary_only/fuzzbench_fork_qemu/src/fuzzer.rs index 862bbc243a..838f8d8df5 100644 --- a/fuzzers/fuzzbench_fork_qemu/src/fuzzer.rs +++ b/fuzzers/binary_only/fuzzbench_fork_qemu/src/fuzzer.rs @@ -9,6 +9,7 @@ use std::{ io::{self, Write}, path::PathBuf, process, + ptr::NonNull, time::Duration, }; @@ -23,8 +24,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, mutators::{ - scheduled::havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, - StdMOptMutator, StdScheduledMutator, Tokens, + havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, StdMOptMutator, + StdScheduledMutator, Tokens, }, observers::{CanTrack, ConstMapObserver, HitcountsMapObserver, TimeObserver}, schedulers::{ @@ -39,21 +40,23 @@ use libafl::{ }; use libafl_bolts::{ current_time, - os::{dup2, unix_signals::Signal}, + os::dup2, rands::StdRand, shmem::{ShMemProvider, StdShMemProvider}, tuples::{tuple_list, Merge}, AsSlice, AsSliceMut, }; use libafl_qemu::{ - cmplog::{CmpLogMap, CmpLogObserver, QemuCmpLogChildHelper}, - edges::{QemuEdgeCoverageChildHelper, EDGES_MAP_PTR, EDGES_MAP_SIZE_IN_USE}, elf::EasyElf, filter_qemu_args, - hooks::QemuHooks, - GuestReg, MmapPerms, Qemu, QemuExitError, QemuExitReason, QemuForkExecutor, QemuShutdownCause, - Regs, + modules::{ + cmplog::{CmpLogChildModule, CmpLogMap, CmpLogObserver}, + edges::StdEdgeCoverageChildModule, + }, + Emulator, GuestReg, MmapPerms, QemuExitError, QemuExitReason, QemuForkExecutor, + QemuShutdownCause, Regs, }; +use libafl_targets::{EDGES_MAP_DEFAULT_SIZE, EDGES_MAP_PTR}; #[cfg(unix)] use nix::unistd::dup; @@ -147,8 +150,37 @@ fn fuzz( env::remove_var("LD_LIBRARY_PATH"); let args: Vec = env::args().collect(); - let env: Vec<(String, String)> = env::vars().collect(); - let qemu = Qemu::init(&args, &env)?; + + let mut shmem_provider = StdShMemProvider::new()?; + + let mut edges_shmem = shmem_provider.new_shmem(EDGES_MAP_DEFAULT_SIZE).unwrap(); + let edges = edges_shmem.as_slice_mut(); + unsafe { EDGES_MAP_PTR = edges.as_mut_ptr() }; + + // Create an observation channel using the coverage map + let mut edges_observer = unsafe { + HitcountsMapObserver::new(ConstMapObserver::from_mut_ptr( + "edges", + NonNull::new(edges.as_mut_ptr()) + .expect("map ptr is null.") + .cast::<[u8; EDGES_MAP_DEFAULT_SIZE]>(), + )) + .track_indices() + }; + + let emulator_modules = tuple_list!( + StdEdgeCoverageChildModule::builder() + .const_map_observer(edges_observer.as_mut()) + .build()?, + CmpLogChildModule::default(), + ); + + let emulator = Emulator::empty() + .qemu_cli(args) + .modules(emulator_modules) + .build()?; + + let qemu = emulator.qemu(); let mut elf_buffer = Vec::new(); let elf = EasyElf::from_file(qemu.binary_path(), &mut elf_buffer)?; @@ -166,11 +198,12 @@ fn fuzz( } } - println!("Break at {:#x}", qemu.read_reg::<_, u64>(Regs::Pc).unwrap()); + println!("Break at {:#x}", qemu.read_reg(Regs::Pc).unwrap()); let stack_ptr: u64 = qemu.read_reg(Regs::Sp).unwrap(); let mut ret_addr = [0; 8]; - unsafe { qemu.read_mem(stack_ptr, &mut ret_addr) }; + qemu.read_mem(stack_ptr, &mut ret_addr) + .expect("qemu read failed"); let ret_addr = u64::from_le_bytes(ret_addr); println!("Stack pointer = {stack_ptr:#x}"); @@ -206,18 +239,14 @@ fn fuzz( writeln!(log.borrow_mut(), "{:?} {s}", current_time()).unwrap(); }); - let mut shmem_provider = StdShMemProvider::new()?; - - let mut edges_shmem = shmem_provider.new_shmem(EDGES_MAP_SIZE_IN_USE).unwrap(); - let edges = edges_shmem.as_slice_mut(); - unsafe { EDGES_MAP_PTR = edges.as_mut_ptr() }; - let mut cmp_shmem = shmem_provider.uninit_on_shmem::().unwrap(); let cmplog = cmp_shmem.as_slice_mut(); // Beginning of a page should be properly aligned. #[allow(clippy::cast_ptr_alignment)] - let cmplog_map_ptr = cmplog.as_mut_ptr().cast::(); + let cmplog_map_ptr = cmplog + .as_mut_ptr() + .cast::(); let (state, mut mgr) = match SimpleRestartingEventManager::launch(monitor, &mut shmem_provider) { @@ -233,15 +262,6 @@ fn fuzz( }, }; - // Create an observation channel using the coverage map - let edges_observer = unsafe { - HitcountsMapObserver::new(ConstMapObserver::<_, EDGES_MAP_SIZE_IN_USE>::from_mut_ptr( - "edges", - edges.as_mut_ptr(), - )) - .track_indices() - }; - // Create an observation channel to keep track of the execution time let time_observer = TimeObserver::new("time"); @@ -294,19 +314,20 @@ fn fuzz( 5, )?; - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( &edges_observer, - PowerQueueScheduler::new(&mut state, &edges_observer, PowerSchedule::FAST), + PowerQueueScheduler::new(&mut state, &edges_observer, PowerSchedule::fast()), ); // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); // The wrapped harness function, calling out to the LLVM-style harness - let mut harness = |input: &BytesInput| { + let mut harness = |_emulator: &mut Emulator<_, _, _, _, _>, input: &BytesInput| { let target = input.target_bytes(); let mut buf = target.as_slice(); let mut len = buf.len(); @@ -316,7 +337,10 @@ fn fuzz( } unsafe { - qemu.write_mem(input_addr, buf); + // # Safety + // The input buffer size is checked above. We use `write_mem_unchecked` for performance reasons + // For better error handling, use `write_mem` and handle the returned Result + qemu.write_mem_unchecked(input_addr, buf); qemu.write_reg(Regs::Rdi, input_addr).unwrap(); qemu.write_reg(Regs::Rsi, len as GuestReg).unwrap(); @@ -324,28 +348,21 @@ fn fuzz( qemu.write_reg(Regs::Rsp, stack_ptr).unwrap(); match qemu.run() { - Ok(QemuExitReason::Breakpoint(_)) => {} - Ok(QemuExitReason::End(QemuShutdownCause::HostSignal(Signal::SigInterrupt))) => { - process::exit(0) + Ok(QemuExitReason::Breakpoint(_)) => ExitKind::Ok, + Ok(QemuExitReason::End(QemuShutdownCause::HostSignal(signal))) => { + signal.handle(); + panic!("Unexpected signal: {signal:?}"); + } + Err(QemuExitError::UnexpectedExit) => ExitKind::Crash, + _ => { + panic!("Unexpected QEMU exit.") } - Err(QemuExitError::UnexpectedExit) => return ExitKind::Crash, - _ => panic!("Unexpected QEMU exit."), } } - - ExitKind::Ok }; - let mut hooks = QemuHooks::new( - qemu.clone(), - tuple_list!( - QemuEdgeCoverageChildHelper::default(), - QemuCmpLogChildHelper::default(), - ), - ); - let executor = QemuForkExecutor::new( - &mut hooks, + emulator, &mut harness, tuple_list!(edges_observer, time_observer), &mut fuzzer, diff --git a/fuzzers/fuzzbench_fork_qemu/src/main.rs b/fuzzers/binary_only/fuzzbench_fork_qemu/src/main.rs similarity index 100% rename from fuzzers/fuzzbench_fork_qemu/src/main.rs rename to fuzzers/binary_only/fuzzbench_fork_qemu/src/main.rs diff --git a/fuzzers/baby_fuzzer_nautilus/.gitignore b/fuzzers/binary_only/fuzzbench_qemu/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer_nautilus/.gitignore rename to fuzzers/binary_only/fuzzbench_qemu/.gitignore diff --git a/fuzzers/binary_only/fuzzbench_qemu/Cargo.toml b/fuzzers/binary_only/fuzzbench_qemu/Cargo.toml new file mode 100644 index 0000000000..6bef3d9f1c --- /dev/null +++ b/fuzzers/binary_only/fuzzbench_qemu/Cargo.toml @@ -0,0 +1,37 @@ +[package] +name = "fuzzbench_qemu" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[profile.release-fuzzbench] +inherits = "release" +debug = false +strip = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_qemu = { path = "../../../libafl_qemu", features = [ + "x86_64", + "usermode", +] } +libafl_targets = { path = "../../../libafl_targets", version = "0.14.1" } + +env_logger = "0.11.5" +log = { version = "0.4.22", features = ["release_max_level_info"] } +clap = { version = "4.5.18", features = ["default"] } +nix = { version = "0.29.0", features = ["fs"] } diff --git a/fuzzers/binary_only/fuzzbench_qemu/Makefile.toml b/fuzzers/binary_only/fuzzbench_qemu/Makefile.toml new file mode 100644 index 0000000000..1ebe6632bb --- /dev/null +++ b/fuzzers/binary_only/fuzzbench_qemu/Makefile.toml @@ -0,0 +1,99 @@ +# Variables +[env] +FUZZER_NAME = 'harness' +PROJECT_DIR = { script = ["pwd"] } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } +TARGET_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}" + +[tasks.unsupported] +script_runner = "@shell" +script = ''' +echo "Qemu fuzzer not supported on windows" +''' + +# fuzzer +[tasks.fuzzer] +linux_alias = "fuzzer_unix" +mac_alias = "fuzzer_unix" +windows_alias = "unsupported" + +[tasks.fuzzer_unix] +command = "cargo" +args = ["build", "--profile", "${PROFILE}"] + +# Harness +[tasks.harness] +linux_alias = "harness_unix" +mac_alias = "harness_unix" +windows_alias = "unsupported" + +[tasks.harness_unix] +script_runner = "@shell" +script = ''' +cc -c "${PROJECT_DIR}/libfuzzer_main.c" +cc \ + ./fuzz.c \ + ./libfuzzer_main.o \ + -o ${FUZZER_NAME} \ + -lm -lz +''' + +# Run the fuzzer +[tasks.run] +linux_alias = "run_unix" +mac_alias = "run_unix" +windows_alias = "unsupported" + +[tasks.run_unix] +command = "cargo" +args = [ + "run", + "--profile", + "${PROFILE}", + "./${FUZZER_NAME}", + "--", + "--libafl-in", + "../../inprocess/libfuzzer_libpng/corpus", + "--libafl-out", + "./out", + "./${FUZZER_NAME}", +] +dependencies = ["harness"] + +# Run the fuzzer +[tasks.test] +linux_alias = "test_unix" +mac_alias = "test_unix" +windows_alias = "unsupported" + +# Short test +[tasks.test_unix] +script_runner = "@shell" +script = ''' +timeout 15s ${TARGET_DIR}/${PROFILE_DIR}/fuzzbench_qemu ${PROJECT_DIR}/harness -- --libafl-in ${PROJECT_DIR}/../../inprocess/libfuzzer_libpng/corpus --libafl-out ${PROJECT_DIR}/out ${PROJECT_DIR}/harness | tee fuzz_stdout.log +if grep -qa "objectives: 1" fuzz_stdout.log; then + echo "Fuzzer is working" +else + echo "Fuzzer does not generate any testcases or any crashes" + exit 1 +fi +''' +dependencies = ["harness", "fuzzer"] + +# Clean up +[tasks.clean] +linux_alias = "clean_unix" +mac_alias = "clean_unix" +windows_alias = "unsupported" + +[tasks.clean_unix] +# Disable default `clean` definition +clear = true +script_runner = "@shell" +script = ''' +rm -f ./${FUZZER_NAME} +cargo clean +''' diff --git a/fuzzers/fuzzbench/README.md b/fuzzers/binary_only/fuzzbench_qemu/README.md similarity index 100% rename from fuzzers/fuzzbench/README.md rename to fuzzers/binary_only/fuzzbench_qemu/README.md diff --git a/fuzzers/fuzzbench_ctx/fuzz.c b/fuzzers/binary_only/fuzzbench_qemu/fuzz.c similarity index 100% rename from fuzzers/fuzzbench_ctx/fuzz.c rename to fuzzers/binary_only/fuzzbench_qemu/fuzz.c diff --git a/fuzzers/fuzzbench_qemu/libfuzzer_main.c b/fuzzers/binary_only/fuzzbench_qemu/libfuzzer_main.c similarity index 100% rename from fuzzers/fuzzbench_qemu/libfuzzer_main.c rename to fuzzers/binary_only/fuzzbench_qemu/libfuzzer_main.c diff --git a/fuzzers/fuzzbench_qemu/src/fuzzer.rs b/fuzzers/binary_only/fuzzbench_qemu/src/fuzzer.rs similarity index 81% rename from fuzzers/fuzzbench_qemu/src/fuzzer.rs rename to fuzzers/binary_only/fuzzbench_qemu/src/fuzzer.rs index 1c60226ca0..a26a588bb4 100644 --- a/fuzzers/fuzzbench_qemu/src/fuzzer.rs +++ b/fuzzers/binary_only/fuzzbench_qemu/src/fuzzer.rs @@ -1,6 +1,6 @@ //! A singlethreaded QEMU fuzzer that can auto-restart. -use core::{cell::RefCell, ptr::addr_of_mut, time::Duration}; +use core::{cell::RefCell, time::Duration}; #[cfg(unix)] use std::os::unix::io::{AsRawFd, FromRawFd}; use std::{ @@ -22,8 +22,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, mutators::{ - scheduled::havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, - StdMOptMutator, StdScheduledMutator, Tokens, + havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, StdMOptMutator, + StdScheduledMutator, Tokens, }, observers::{CanTrack, HitcountsMapObserver, TimeObserver, VariableMapObserver}, schedulers::{ @@ -38,7 +38,7 @@ use libafl::{ }; use libafl_bolts::{ current_time, - os::{dup2, unix_signals::Signal}, + os::dup2, ownedref::OwnedMutSlice, rands::StdRand, shmem::{ShMemProvider, StdShMemProvider}, @@ -46,14 +46,11 @@ use libafl_bolts::{ AsSlice, }; use libafl_qemu::{ - // asan::{init_with_asan, QemuAsanHelper}, - cmplog::{CmpLogObserver, QemuCmpLogHelper}, - edges::edges_map_mut_ptr, - edges::QemuEdgeCoverageHelper, - edges::{EDGES_MAP_SIZE_IN_USE, MAX_EDGES_FOUND}, elf::EasyElf, filter_qemu_args, - hooks::QemuHooks, + // asan::{init_with_asan, QemuAsanHelper}, + modules::cmplog::{CmpLogModule, CmpLogObserver}, + modules::edges::StdEdgeCoverageModule, Emulator, GuestReg, //snapshot::QemuSnapshotHelper, @@ -65,6 +62,7 @@ use libafl_qemu::{ QemuShutdownCause, Regs, }; +use libafl_targets::{edges_map_mut_ptr, EDGES_MAP_ALLOCATED_SIZE, MAX_EDGES_FOUND}; #[cfg(unix)] use nix::unistd::dup; @@ -173,11 +171,11 @@ fn fuzz( logfile: PathBuf, timeout: Duration, ) -> Result<(), Error> { + env_logger::init(); env::remove_var("LD_LIBRARY_PATH"); let args: Vec = env::args().collect(); - let env: Vec<(String, String)> = env::vars().collect(); - let qemu = Qemu::init(&args, &env).unwrap(); + let qemu = Qemu::init(&args).expect("QEMU init failed"); // let (emu, asan) = init_with_asan(&mut args, &mut env).unwrap(); let mut elf_buffer = Vec::new(); @@ -196,11 +194,14 @@ fn fuzz( } } - println!("Break at {:#x}", qemu.read_reg::<_, u64>(Regs::Pc).unwrap()); + println!("Break at {:#x}", qemu.read_reg(Regs::Pc).unwrap()); let stack_ptr: u64 = qemu.read_reg(Regs::Sp).unwrap(); let mut ret_addr = [0; 8]; - unsafe { qemu.read_mem(stack_ptr, &mut ret_addr) }; + + qemu.read_mem(stack_ptr, &mut ret_addr) + .expect("Error while reading QEMU memory."); + let ret_addr = u64::from_le_bytes(ret_addr); println!("Stack pointer = {stack_ptr:#x}"); @@ -255,11 +256,11 @@ fn fuzz( }; // Create an observation channel using the coverage map - let edges_observer = unsafe { + let mut edges_observer = unsafe { HitcountsMapObserver::new(VariableMapObserver::from_mut_slice( "edges", - OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_SIZE_IN_USE), - addr_of_mut!(MAX_EDGES_FOUND), + OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_ALLOCATED_SIZE), + &raw mut MAX_EDGES_FOUND, )) .track_indices() }; @@ -316,61 +317,68 @@ fn fuzz( 5, )?; - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( &edges_observer, - PowerQueueScheduler::new(&mut state, &edges_observer, PowerSchedule::FAST), + PowerQueueScheduler::new(&mut state, &edges_observer, PowerSchedule::fast()), ); // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); // The wrapped harness function, calling out to the LLVM-style harness - let mut harness = |input: &BytesInput| { - let target = input.target_bytes(); - let mut buf = target.as_slice(); - let mut len = buf.len(); - if len > MAX_INPUT_SIZE { - buf = &buf[0..MAX_INPUT_SIZE]; - len = MAX_INPUT_SIZE; - } - - unsafe { - qemu.write_mem(input_addr, buf); - - qemu.write_reg(Regs::Rdi, input_addr).unwrap(); - qemu.write_reg(Regs::Rsi, len as GuestReg).unwrap(); - qemu.write_reg(Regs::Rip, test_one_input_ptr).unwrap(); - qemu.write_reg(Regs::Rsp, stack_ptr).unwrap(); - - match qemu.run() { - Ok(QemuExitReason::Breakpoint(_)) => {} - Ok(QemuExitReason::End(QemuShutdownCause::HostSignal(Signal::SigInterrupt))) => { - process::exit(0) - } - Err(QemuExitError::UnexpectedExit) => return ExitKind::Crash, - _ => panic!("Unexpected QEMU exit."), + let mut harness = + |_emulator: &mut Emulator<_, _, _, _, _>, _state: &mut _, input: &BytesInput| { + let target = input.target_bytes(); + let mut buf = target.as_slice(); + let mut len = buf.len(); + if len > MAX_INPUT_SIZE { + buf = &buf[0..MAX_INPUT_SIZE]; + len = MAX_INPUT_SIZE; } - } - ExitKind::Ok - }; + unsafe { + // # Safety + // The input buffer size is checked above. We use `write_mem_unchecked` for performance reasons + // For better error handling, use `write_mem` and handle the returned Result + qemu.write_mem_unchecked(input_addr, buf); - let mut hooks = QemuHooks::new( - qemu.clone(), - tuple_list!( - QemuEdgeCoverageHelper::default(), - QemuCmpLogHelper::default(), - // QemuAsanHelper::default(asan), - //QemuSnapshotHelper::new() - ), + qemu.write_reg(Regs::Rdi, input_addr).unwrap(); + qemu.write_reg(Regs::Rsi, len as GuestReg).unwrap(); + qemu.write_reg(Regs::Rip, test_one_input_ptr).unwrap(); + qemu.write_reg(Regs::Rsp, stack_ptr).unwrap(); + + match qemu.run() { + Ok(QemuExitReason::Breakpoint(_)) => {} + Ok(QemuExitReason::End(QemuShutdownCause::HostSignal(signal))) => { + signal.handle(); + } + Err(QemuExitError::UnexpectedExit) => return ExitKind::Crash, + _ => panic!("Unexpected QEMU exit."), + } + } + + ExitKind::Ok + }; + + let modules = tuple_list!( + StdEdgeCoverageModule::builder() + .map_observer(edges_observer.as_mut()) + .build() + .unwrap(), + CmpLogModule::default(), + // QemuAsanHelper::default(asan), + //QemuSnapshotHelper::new() ); + let emulator = Emulator::empty().qemu(qemu).modules(modules).build()?; + // Create the executor for an in-process function with one observer for edge coverage and one for the execution time let executor = QemuExecutor::new( - &mut hooks, + emulator, &mut harness, tuple_list!(edges_observer, time_observer), &mut fuzzer, @@ -396,7 +404,7 @@ fn fuzz( println!("Failed to load initial corpus at {:?}", &seed_dir); process::exit(0); }); - println!("We imported {} inputs from disk.", state.corpus().count()); + println!("We imported {} input(s) from disk.", state.corpus().count()); } let tracing = ShadowTracingStage::new(&mut executor); diff --git a/fuzzers/fuzzbench_qemu/src/main.rs b/fuzzers/binary_only/fuzzbench_qemu/src/main.rs similarity index 100% rename from fuzzers/fuzzbench_qemu/src/main.rs rename to fuzzers/binary_only/fuzzbench_qemu/src/main.rs diff --git a/fuzzers/binary_only/intel_pt_baby_fuzzer/Cargo.toml b/fuzzers/binary_only/intel_pt_baby_fuzzer/Cargo.toml new file mode 100644 index 0000000000..c2405c55ce --- /dev/null +++ b/fuzzers/binary_only/intel_pt_baby_fuzzer/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "intel_pt_baby_fuzzer" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", + "Marco Cavenati ", +] +edition = "2021" + +[features] +tui = ["libafl/tui_monitor"] + +[dependencies] +libafl = { path = "../../../libafl/", default-features = false, features = [ + "intel_pt", +] } +libafl_bolts = { path = "../../../libafl_bolts" } +proc-maps = "0.4.0" diff --git a/fuzzers/binary_only/intel_pt_baby_fuzzer/README.md b/fuzzers/binary_only/intel_pt_baby_fuzzer/README.md new file mode 100644 index 0000000000..79fc1ced19 --- /dev/null +++ b/fuzzers/binary_only/intel_pt_baby_fuzzer/README.md @@ -0,0 +1,15 @@ +# Baby fuzzer with Intel PT tracing + +This is a minimalistic example about how to create a libafl based fuzzer with Intel PT tracing. + +It runs on a single core until a crash occurs and then exits. + +The tested program is a simple Rust function without any instrumentation. + +After building this example with `cargo build`, you need to give to the executable the necessary capabilities with +`sudo setcap cap_ipc_lock,cap_sys_ptrace,cap_sys_admin,cap_syslog=ep ./target/debug/intel_pt_baby_fuzzer`. + +You can run this example using `cargo run`, and you can enable the TUI feature by building and running with +`--features tui`. + +This fuzzer is compatible with Linux hosts only having an Intel PT compatible CPU. diff --git a/fuzzers/binary_only/intel_pt_baby_fuzzer/src/main.rs b/fuzzers/binary_only/intel_pt_baby_fuzzer/src/main.rs new file mode 100644 index 0000000000..cc84cfce2f --- /dev/null +++ b/fuzzers/binary_only/intel_pt_baby_fuzzer/src/main.rs @@ -0,0 +1,153 @@ +use std::{hint::black_box, num::NonZero, path::PathBuf, process, time::Duration}; + +#[cfg(feature = "tui")] +use libafl::monitors::tui::TuiMonitor; +#[cfg(not(feature = "tui"))] +use libafl::monitors::SimpleMonitor; +use libafl::{ + corpus::{InMemoryCorpus, OnDiskCorpus}, + events::SimpleEventManager, + executors::{ + hooks::intel_pt::{IntelPTHook, Section}, + inprocess::GenericInProcessExecutor, + ExitKind, + }, + feedbacks::{CrashFeedback, MaxMapFeedback}, + fuzzer::{Fuzzer, StdFuzzer}, + generators::RandPrintablesGenerator, + inputs::{BytesInput, HasTargetBytes}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, + observers::StdMapObserver, + schedulers::QueueScheduler, + stages::mutational::StdMutationalStage, + state::StdState, +}; +use libafl_bolts::{current_nanos, rands::StdRand, tuples::tuple_list, AsSlice}; +use proc_maps::get_process_maps; + +// Coverage map +const MAP_SIZE: usize = 4096; +static mut MAP: [u8; MAP_SIZE] = [0; MAP_SIZE]; +#[allow(static_mut_refs)] +static mut MAP_PTR: *mut u8 = unsafe { MAP.as_mut_ptr() }; + +pub fn main() { + // The closure that we want to fuzz + let mut harness = |input: &BytesInput| { + let target = input.target_bytes(); + let buf = target.as_slice(); + if !buf.is_empty() && buf[0] == b'a' { + let _do_something = black_box(0); + if buf.len() > 1 && buf[1] == b'b' { + let _do_something = black_box(0); + if buf.len() > 2 && buf[2] == b'c' { + panic!("Artificial bug triggered =)"); + } + } + } + ExitKind::Ok + }; + + // Create an observation channel using the map + let observer = unsafe { StdMapObserver::from_mut_ptr("signals", MAP_PTR, MAP_SIZE) }; + + // Feedback to rate the interestingness of an input + let mut feedback = MaxMapFeedback::new(&observer); + + // A feedback to choose if an input is a solution or not + let mut objective = CrashFeedback::new(); + + // create a State from scratch + let mut state = StdState::new( + // RNG + StdRand::with_seed(current_nanos()), + // Corpus that will be evolved, we keep it in memory for performance + InMemoryCorpus::new(), + // Corpus in which we store solutions (crashes in this example), + // on disk so the user can get them after stopping the fuzzer + OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(), + // States of the feedbacks. + // The feedbacks can report the data that should persist in the State. + &mut feedback, + // Same for objective feedbacks + &mut objective, + ) + .unwrap(); + + // The Monitor trait define how the fuzzer stats are displayed to the user + #[cfg(not(feature = "tui"))] + let mon = SimpleMonitor::new(|s| println!("{s}")); + #[cfg(feature = "tui")] + let mon = TuiMonitor::builder() + .title("Baby Fuzzer Intel PT") + .enhanced_graphics(false) + .build(); + + // The event manager handle the various events generated during the fuzzing loop + // such as the notification of the addition of a new item to the corpus + let mut mgr = SimpleEventManager::new(mon); + + // A queue policy to get testcases from the corpus + let scheduler = QueueScheduler::new(); + + // A fuzzer with feedbacks and a corpus scheduler + let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); + + // Get the memory map of the current process + let my_pid = i32::try_from(process::id()).unwrap(); + let process_maps = get_process_maps(my_pid).unwrap(); + let sections = process_maps + .iter() + .filter_map(|pm| { + if pm.is_exec() && pm.filename().is_some() { + Some(Section { + file_path: pm.filename().unwrap().to_string_lossy().to_string(), + file_offset: pm.offset as u64, + size: pm.size() as u64, + virtual_address: pm.start() as u64, + }) + } else { + None + } + }) + .collect::>(); + + // Intel PT hook that will handle the setup of Intel PT for each execution and fill the map + let pt_hook = unsafe { + IntelPTHook::builder() + .map_ptr(MAP_PTR) + .map_len(MAP_SIZE) + .image(§ions) + } + .build(); + + type PTInProcessExecutor<'a, H, OT, S, T> = + GenericInProcessExecutor, ()), OT, S>; + // Create the executor for an in-process function with just one observer + let mut executor = PTInProcessExecutor::with_timeout_generic( + tuple_list!(pt_hook), + &mut harness, + tuple_list!(observer), + &mut fuzzer, + &mut state, + &mut mgr, + Duration::from_millis(5000), + ) + .expect("Failed to create the Executor"); + + // Generator of printable bytearrays of max size 32 + let mut generator = RandPrintablesGenerator::new(NonZero::new(32).unwrap()); + + // Generate 8 initial inputs + state + .generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8) + .expect("Failed to generate the initial corpus"); + + // Set up a mutational stage with a basic bytes mutator + let mutator = StdScheduledMutator::new(havoc_mutations()); + let mut stages = tuple_list!(StdMutationalStage::new(mutator)); + + fuzzer + .fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr) + .expect("Error in the fuzzing loop"); +} diff --git a/fuzzers/binary_only/intel_pt_command_executor/Cargo.toml b/fuzzers/binary_only/intel_pt_command_executor/Cargo.toml new file mode 100644 index 0000000000..6aa937c7a9 --- /dev/null +++ b/fuzzers/binary_only/intel_pt_command_executor/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "intel_pt_command_executor" +version = "0.1.0" +authors = ["Marco Cavenati "] +edition = "2021" + +[dependencies] +env_logger = "0.11.5" +libafl = { path = "../../../libafl", default-features = false, features = [ + "intel_pt", +] } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_intelpt = { path = "../../../libafl_intelpt" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/binary_only/intel_pt_command_executor/Makefile.toml b/fuzzers/binary_only/intel_pt_command_executor/Makefile.toml new file mode 100644 index 0000000000..9c2d97e4eb --- /dev/null +++ b/fuzzers/binary_only/intel_pt_command_executor/Makefile.toml @@ -0,0 +1,33 @@ +[env.development] +PROFILE_DIR = "debug" + +[env.release] +PROFILE_DIR = "release" + +[tasks.build_target] +command = "rustc" +args = [ + "src/target_program.rs", + "--out-dir", + "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${PROFILE_DIR}", + "-O", +] + +[tasks.build_fuzzer] +command = "cargo" +args = ["build", "--profile", "${CARGO_MAKE_CARGO_PROFILE}"] + +[tasks.build] +dependencies = ["build_fuzzer", "build_target"] + +[tasks.setcap] +script = "sudo setcap cap_ipc_lock,cap_sys_ptrace,cap_sys_admin,cap_syslog=ep ${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${PROFILE_DIR}/${CARGO_MAKE_CRATE_NAME}" +dependencies = ["build_fuzzer"] + +[tasks.run] +command = "cargo" +args = ["run", "--profile", "${CARGO_MAKE_CARGO_PROFILE}"] +dependencies = ["build", "setcap"] + +[tasks.default] +alias = "run" diff --git a/fuzzers/binary_only/intel_pt_command_executor/README.md b/fuzzers/binary_only/intel_pt_command_executor/README.md new file mode 100644 index 0000000000..967733b505 --- /dev/null +++ b/fuzzers/binary_only/intel_pt_command_executor/README.md @@ -0,0 +1,21 @@ +# Linux Binary-Only Fuzzer with Intel PT Tracing + +This fuzzer is designed to target a Linux binary (without requiring source code instrumentation) and leverages Intel +Processor Trace (PT) to compute code coverage. + +## Prerequisites + +- A Linux host with an Intel Processor Trace (PT) compatible CPU +- `cargo-make` installed +- Sudo access to grant necessary capabilities to the fuzzer + +## How to Run the Fuzzer + +To compile and run the fuzzer (and the target program) execute the following command: +```sh +cargo make +``` + +> **Note**: This command may prompt you for your password to assign capabilities required for Intel PT. If you'd prefer +> not to run it with elevated permissions, you can review and execute the commands from `Makefile.toml` +> individually. diff --git a/fuzzers/binary_only/intel_pt_command_executor/src/main.rs b/fuzzers/binary_only/intel_pt_command_executor/src/main.rs new file mode 100644 index 0000000000..e8c977a775 --- /dev/null +++ b/fuzzers/binary_only/intel_pt_command_executor/src/main.rs @@ -0,0 +1,146 @@ +use std::{ + env, ffi::CString, num::NonZero, os::unix::ffi::OsStrExt, path::PathBuf, time::Duration, +}; + +use libafl::{ + corpus::{InMemoryCorpus, OnDiskCorpus}, + events::SimpleEventManager, + executors::{ + command::{CommandConfigurator, PTraceCommandConfigurator}, + hooks::intel_pt::{IntelPTHook, Section}, + }, + feedbacks::{CrashFeedback, MaxMapFeedback}, + fuzzer::{Fuzzer, StdFuzzer}, + generators::RandPrintablesGenerator, + monitors::SimpleMonitor, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, + observers::StdMapObserver, + schedulers::QueueScheduler, + stages::mutational::StdMutationalStage, + state::StdState, +}; +use libafl_bolts::{core_affinity, rands::StdRand, tuples::tuple_list}; +use libafl_intelpt::{IntelPT, PAGE_SIZE}; + +// Coverage map +const MAP_SIZE: usize = 4096; +static mut MAP: [u8; MAP_SIZE] = [0; MAP_SIZE]; +#[allow(static_mut_refs)] +static mut MAP_PTR: *mut u8 = unsafe { MAP.as_mut_ptr() }; + +pub fn main() { + // Let's set the default logging level to `warn` + if env::var("RUST_LOG").is_err() { + env::set_var("RUST_LOG", "warn") + } + // Enable logging + env_logger::init(); + + let target_path = PathBuf::from(env::args().next().unwrap()) + .parent() + .unwrap() + .join("target_program"); + + // We'll run the target on cpu (aka core) 0 + let cpu = core_affinity::get_core_ids().unwrap()[0]; + log::debug!("Using core {} for fuzzing", cpu.0); + + // Create an observation channel using the map + let observer = unsafe { StdMapObserver::from_mut_ptr("signals", MAP_PTR, MAP_SIZE) }; + + // Feedback to rate the interestingness of an input + let mut feedback = MaxMapFeedback::new(&observer); + + // A feedback to choose if an input is a solution or not + let mut objective = CrashFeedback::new(); + + // create a State from scratch + let mut state = StdState::new( + // RNG + StdRand::new(), + // Corpus that will be evolved, we keep it in memory for performance + InMemoryCorpus::new(), + // Corpus in which we store solutions (crashes in this example), + // on disk so the user can get them after stopping the fuzzer + OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(), + // States of the feedbacks. + // The feedbacks can report the data that should persist in the State. + &mut feedback, + // Same for objective feedbacks + &mut objective, + ) + .unwrap(); + + // The Monitor trait define how the fuzzer stats are displayed to the user + let mon = SimpleMonitor::new(|s| println!("{s}")); + + // The event manager handle the various events generated during the fuzzing loop + // such as the notification of the addition of a new item to the corpus + let mut mgr = SimpleEventManager::new(mon); + + // A queue policy to get testcases from the corpus + let scheduler = QueueScheduler::new(); + + // A fuzzer with feedbacks and a corpus scheduler + let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); + + let mut intel_pt = IntelPT::builder().cpu(cpu.0).inherit(true).build().unwrap(); + + // The target is a ET_DYN elf, it will be relocated by the loader with this offset. + // see https://github.com/torvalds/linux/blob/c1e939a21eb111a6d6067b38e8e04b8809b64c4e/arch/x86/include/asm/elf.h#L234C1-L239C38 + const DEFAULT_MAP_WINDOW: usize = (1 << 47) - PAGE_SIZE; + const ELF_ET_DYN_BASE: usize = DEFAULT_MAP_WINDOW / 3 * 2 & !(PAGE_SIZE - 1); + + // Set the instruction pointer (IP) filter and memory image of our target. + // These information can be retrieved from `readelf -l` (for example) + let code_memory_addresses = ELF_ET_DYN_BASE + 0x14000..=ELF_ET_DYN_BASE + 0x14000 + 0x40000; + + intel_pt + .set_ip_filters(&[code_memory_addresses.clone()]) + .unwrap(); + + let sections = [Section { + file_path: target_path.to_string_lossy().to_string(), + file_offset: 0x13000, + size: (*code_memory_addresses.end() - *code_memory_addresses.start() + 1) as u64, + virtual_address: *code_memory_addresses.start() as u64, + }]; + + let hook = unsafe { IntelPTHook::builder().map_ptr(MAP_PTR).map_len(MAP_SIZE) } + .intel_pt(intel_pt) + .image(§ions) + .build(); + + let target_cstring = CString::from( + target_path + .as_os_str() + .as_bytes() + .iter() + .map(|&b| NonZero::new(b).unwrap()) + .collect::>(), + ); + + let command_configurator = PTraceCommandConfigurator::builder() + .path(target_cstring) + .cpu(cpu) + .timeout(Duration::from_secs(2)) + .build(); + let mut executor = + command_configurator.into_executor_with_hooks(tuple_list!(observer), tuple_list!(hook)); + + // Generator of printable bytearrays of max size 32 + let mut generator = RandPrintablesGenerator::new(NonZero::new(32).unwrap()); + + // Generate 8 initial inputs + state + .generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8) + .expect("Failed to generate the initial corpus"); + + // Setup a mutational stage with a basic bytes mutator + let mutator = StdScheduledMutator::new(havoc_mutations()); + let mut stages = tuple_list!(StdMutationalStage::new(mutator)); + + fuzzer + .fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr) + .expect("Error in the fuzzing loop"); +} diff --git a/fuzzers/binary_only/intel_pt_command_executor/src/target_program.rs b/fuzzers/binary_only/intel_pt_command_executor/src/target_program.rs new file mode 100644 index 0000000000..f1636bb6dc --- /dev/null +++ b/fuzzers/binary_only/intel_pt_command_executor/src/target_program.rs @@ -0,0 +1,19 @@ +use std::{ + hint::black_box, + io::{stdin, Read}, +}; + +fn main() { + let mut buf = Vec::new(); + stdin().read_to_end(&mut buf).unwrap(); + + if !buf.is_empty() && buf[0] == b'a' { + let _do_something = black_box(0); + if buf.len() > 1 && buf[1] == b'b' { + let _do_something = black_box(0); + if buf.len() > 2 && buf[2] == b'c' { + panic!("Artificial bug triggered =)"); + } + } + } +} diff --git a/fuzzers/python_qemu/README.md b/fuzzers/binary_only/python_qemu/README.md similarity index 100% rename from fuzzers/python_qemu/README.md rename to fuzzers/binary_only/python_qemu/README.md diff --git a/fuzzers/python_qemu/fuzz.c b/fuzzers/binary_only/python_qemu/fuzz.c similarity index 100% rename from fuzzers/python_qemu/fuzz.c rename to fuzzers/binary_only/python_qemu/fuzz.c diff --git a/fuzzers/python_qemu/fuzzer.py b/fuzzers/binary_only/python_qemu/fuzzer.py similarity index 70% rename from fuzzers/python_qemu/fuzzer.py rename to fuzzers/binary_only/python_qemu/fuzzer.py index 71fc023580..295159cad5 100644 --- a/fuzzers/python_qemu/fuzzer.py +++ b/fuzzers/binary_only/python_qemu/fuzzer.py @@ -4,31 +4,32 @@ from pylibafl import sugar, qemu import lief MAX_SIZE = 0x100 -BINARY_PATH = './a.out' +BINARY_PATH = "./a.out" -emu = qemu.Qemu(['qemu-x86_64', BINARY_PATH], []) +emu = qemu.Qemu(["qemu-x86_64", BINARY_PATH], []) elf = lief.parse(BINARY_PATH) test_one_input = elf.get_function_address("LLVMFuzzerTestOneInput") if elf.is_pie: test_one_input += emu.load_addr() -print('LLVMFuzzerTestOneInput @ 0x%x' % test_one_input) +print("LLVMFuzzerTestOneInput @ 0x%x" % test_one_input) emu.set_breakpoint(test_one_input) emu.run() sp = emu.read_reg(qemu.regs.Rsp) -print('SP = 0x%x' % sp) +print("SP = 0x%x" % sp) -retaddr = int.from_bytes(emu.read_mem(sp, 8), 'little') -print('RET = 0x%x' % retaddr) +retaddr = int.from_bytes(emu.read_mem(sp, 8), "little") +print("RET = 0x%x" % retaddr) inp = emu.map_private(0, MAX_SIZE, qemu.mmap.ReadWrite) -assert(inp > 0) +assert inp > 0 emu.remove_breakpoint(test_one_input) emu.set_breakpoint(retaddr) + def harness(b): if len(b) > MAX_SIZE: b = b[:MAX_SIZE] @@ -39,5 +40,6 @@ def harness(b): emu.write_reg(qemu.regs.Rip, test_one_input) emu.run() -fuzz = sugar.QemuBytesCoverageSugar(['./in'], './out', 3456, [0,1,2,3]) + +fuzz = sugar.QemuBytesCoverageSugar(["./in"], "./out", 3456, [0, 1, 2, 3]) fuzz.run(emu, harness) diff --git a/fuzzers/qemu_cmin/.gitignore b/fuzzers/binary_only/qemu_cmin/.gitignore similarity index 100% rename from fuzzers/qemu_cmin/.gitignore rename to fuzzers/binary_only/qemu_cmin/.gitignore diff --git a/fuzzers/binary_only/qemu_cmin/Cargo.toml b/fuzzers/binary_only/qemu_cmin/Cargo.toml new file mode 100644 index 0000000000..26bff24c9f --- /dev/null +++ b/fuzzers/binary_only/qemu_cmin/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "qemu_cmin" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", + "WorksButNotTested", +] +edition = "2021" + +[profile.release] +#lto = true +#codegen-units = 1 +#opt-level = 3 +debug = true + +[features] +default = ["std"] +std = [] +be = ["libafl_qemu/be"] +arm = ["libafl_qemu/arm"] +x86_64 = ["libafl_qemu/x86_64"] +i386 = ["libafl_qemu/i386"] +aarch64 = ["libafl_qemu/aarch64"] +mips = ["libafl_qemu/mips"] +ppc = ["libafl_qemu/ppc", "be"] + +[build-dependencies] +vergen = { version = "9.0.1", features = ["build", "cargo", "rustc", "si"] } +vergen-git2 = "1.0.1" + +[dependencies] +clap = { version = "4.5.18", features = ["derive", "string"] } +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_qemu = { path = "../../../libafl_qemu", features = ["usermode"] } +libafl_targets = { path = "../../../libafl_targets" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +rangemap = { version = "1.5.1" } diff --git a/fuzzers/qemu_cmin/Makefile.toml b/fuzzers/binary_only/qemu_cmin/Makefile.toml similarity index 74% rename from fuzzers/qemu_cmin/Makefile.toml rename to fuzzers/binary_only/qemu_cmin/Makefile.toml index c43f51af6b..2b292c0505 100644 --- a/fuzzers/qemu_cmin/Makefile.toml +++ b/fuzzers/binary_only/qemu_cmin/Makefile.toml @@ -1,6 +1,8 @@ [env] -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } CROSS_CC = "x86_64-linux-gnu-gcc" CROSS_CXX = "x86_64-linux-gnu-g++" CROSS_CFLAGS = "" @@ -73,32 +75,32 @@ FEATURE = "ppc" LIBAFL_QEMU_CLONE_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/qemu-libafl-bridge" [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Qemu fuzzer not supported on windows/mac" ''' [tasks.target_dir] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = ["${CARGO_MAKE_CRATE_TARGET_DIRECTORY}"] } +script_runner = "@shell" +script = ''' mkdir ${CARGO_MAKE_CRATE_TARGET_DIRECTORY} ''' [tasks.deps_dir] dependencies = ["target_dir"] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = ["${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/"] } +script_runner = "@shell" +script = ''' mkdir ${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/ ''' [tasks.arch_target_dir] dependencies = ["target_dir"] -condition = { files_not_exist = [ "${TARGET_DIR}" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = ["${TARGET_DIR}"] } +script_runner = "@shell" +script = ''' mkdir ${TARGET_DIR} ''' @@ -109,11 +111,13 @@ windows_alias = "unsupported" [tasks.zlib_unix_wget] dependencies = ["deps_dir"] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/zlib-1.2.13" ] } -script_runner="@shell" +condition = { files_not_exist = [ + "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/zlib-1.2.13", +] } +script_runner = "@shell" # NOTE: There's no specific reason we're using an old version of zlib, # but newer versions get moved to fossils/ after a while. -script=''' +script = ''' wget \ -O "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/zlib-1.2.13.tar.gz" \ https://zlib.net/fossils/zlib-1.2.13.tar.gz @@ -124,10 +128,10 @@ tar \ ''' [tasks.zlib_unix] -dependencies = ["arch_target_dir", "zlib_unix_wget" ] -condition = { files_not_exist = [ "${TARGET_DIR}/build-zlib/libz.a" ] } -script_runner="@shell" -script=''' +dependencies = ["arch_target_dir", "zlib_unix_wget"] +condition = { files_not_exist = ["${TARGET_DIR}/build-zlib/libz.a"] } +script_runner = "@shell" +script = ''' rm -rf ${TARGET_DIR}/build-zlib/ mkdir ${TARGET_DIR}/build-zlib/ @@ -148,9 +152,11 @@ windows_alias = "unsupported" [tasks.libpng_unix_wget] dependencies = ["deps_dir"] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/libpng-1.6.37" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = [ + "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/libpng-1.6.37", +] } +script_runner = "@shell" +script = ''' wget \ -O "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/v1.6.37.tar.gz" \ https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz @@ -161,10 +167,10 @@ tar \ ''' [tasks.libpng_unix] -dependencies = [ "arch_target_dir", "zlib", "libpng_unix_wget" ] -condition = { files_not_exist = [ "${TARGET_DIR}/build-png/.libs/libpng16.a" ] } -script_runner="@shell" -script=''' +dependencies = ["arch_target_dir", "zlib", "libpng_unix_wget"] +condition = { files_not_exist = ["${TARGET_DIR}/build-png/.libs/libpng16.a"] } +script_runner = "@shell" +script = ''' rm -rf ${TARGET_DIR}/build-png/ mkdir ${TARGET_DIR}/build-png/ @@ -190,17 +196,19 @@ windows_alias = "unsupported" [tasks.build_unix] command = "cargo" args = [ - "build", - "--profile", - "${PROFILE}", - "--features", "${FEATURE}", - "--target-dir", "${TARGET_DIR}" + "build", + "--profile", + "${PROFILE}", + "--features", + "${FEATURE}", + "--target-dir", + "${TARGET_DIR}", ] [tasks.fuzzer] dependencies = ["build"] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ${TARGET_DIR}/${PROFILE_DIR}/qemu_cmin-${CARGO_MAKE_PROFILE} mv ${TARGET_DIR}/${PROFILE_DIR}/qemu_cmin ${TARGET_DIR}/${PROFILE_DIR}/qemu_cmin-${CARGO_MAKE_PROFILE} ''' @@ -211,8 +219,8 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.harness_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' ${CROSS_CXX} \ ./harness.cc \ $CROSS_CFLAGS \ @@ -225,7 +233,7 @@ ${CROSS_CXX} \ -lm \ -static ''' -dependencies = [ "libpng" ] +dependencies = ["libpng"] [tasks.run] linux_alias = "run_unix" @@ -235,13 +243,15 @@ windows_alias = "unsupported" [tasks.run_unix] command = "${TARGET_DIR}/${PROFILE_DIR}/qemu_cmin-${CARGO_MAKE_PROFILE}" args = [ - "--output", "./output", - "--input", "./corpus", - "--verbose", - "--", - "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", + "--output", + "./output", + "--input", + "./corpus", + "--verbose", + "--", + "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", ] -dependencies = [ "harness", "fuzzer" ] +dependencies = ["harness", "fuzzer"] [tasks.test] linux_alias = "test_unix" @@ -249,13 +259,10 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.test_unix] -dependencies = [ "lightweight" ] +dependencies = ["lightweight"] # Tidy up after we've run our tests so we don't hog all the disk space command = "cargo" -args = [ - "make", - "clean", -] +args = ["make", "clean"] [tasks.test_full] linux_alias = "test_unix_full" @@ -263,13 +270,10 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.test_unix_full] -dependencies = [ "all" ] +dependencies = ["all"] # Tidy up after we've run our tests so we don't hog all the disk space command = "cargo" -args = [ - "make", - "clean", -] +args = ["make", "clean"] [tasks.clean] linux_alias = "clean_unix" @@ -279,72 +283,38 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf ${CARGO_MAKE_CRATE_TARGET_DIRECTORY} cargo clean ''' [tasks.arm] command = "cargo" -args = [ - "make", - "-p", "arm", - "run", -] +args = ["make", "-p", "arm", "run"] [tasks.aarch64] command = "cargo" -args = [ - "make", - "-p", "aarch64", - "run", -] +args = ["make", "-p", "aarch64", "run"] [tasks.x86_64] command = "cargo" -args = [ - "make", - "-p", "x86_64", - "run", -] +args = ["make", "-p", "x86_64", "run"] [tasks.i386] command = "cargo" -args = [ - "make", - "-p", "i386", - "run", -] +args = ["make", "-p", "i386", "run"] [tasks.mips] command = "cargo" -args = [ - "make", - "-p", "mips", - "run", -] +args = ["make", "-p", "mips", "run"] [tasks.ppc] command = "cargo" -args = [ - "make", - "-p", "ppc", - "run", -] +args = ["make", "-p", "ppc", "run"] [tasks.all] -dependencies = [ - "arm", - "aarch64", - "x86_64", - "i386", - "mips", - "ppc" -] +dependencies = ["arm", "aarch64", "x86_64", "i386", "mips", "ppc"] [tasks.lightweight] -dependencies = [ - "arm", - "x86_64", -] +dependencies = ["arm", "x86_64"] diff --git a/fuzzers/qemu_cmin/README.md b/fuzzers/binary_only/qemu_cmin/README.md similarity index 100% rename from fuzzers/qemu_cmin/README.md rename to fuzzers/binary_only/qemu_cmin/README.md diff --git a/fuzzers/qemu_cmin/build.rs b/fuzzers/binary_only/qemu_cmin/build.rs similarity index 60% rename from fuzzers/qemu_cmin/build.rs rename to fuzzers/binary_only/qemu_cmin/build.rs index 16317b1560..4a3e22ea6a 100644 --- a/fuzzers/qemu_cmin/build.rs +++ b/fuzzers/binary_only/qemu_cmin/build.rs @@ -1,11 +1,12 @@ -use vergen::EmitBuilder; +use vergen::{BuildBuilder, CargoBuilder, Emitter, RustcBuilder, SysinfoBuilder}; +use vergen_git2::Git2Builder; #[macro_export] macro_rules! assert_unique_feature { () => {}; ($first:tt $(,$rest:tt)*) => { $( - #[cfg(all(not(any(doc, feature = "clippy")), feature = $first, feature = $rest))] + #[cfg(all(not(doc), feature = $first, feature = $rest))] compile_error!(concat!("features \"", $first, "\" and \"", $rest, "\" cannot be used together")); )* assert_unique_feature!($($rest),*); @@ -13,12 +14,23 @@ macro_rules! assert_unique_feature { } fn main() { - EmitBuilder::builder() - .all_build() - .all_cargo() - .all_git() - .all_rustc() - .all_sysinfo() + let build = BuildBuilder::all_build().unwrap(); + let cargo = CargoBuilder::all_cargo().unwrap(); + let git = Git2Builder::all_git().unwrap(); + let rustc = RustcBuilder::all_rustc().unwrap(); + let sysinfo = SysinfoBuilder::all_sysinfo().unwrap(); + + Emitter::default() + .add_instructions(&build) + .unwrap() + .add_instructions(&cargo) + .unwrap() + .add_instructions(&git) + .unwrap() + .add_instructions(&rustc) + .unwrap() + .add_instructions(&sysinfo) + .unwrap() .emit() .unwrap(); diff --git a/fuzzers/libfuzzer_libpng/corpus/not_kitty.png b/fuzzers/binary_only/qemu_cmin/corpus/not_kitty.png similarity index 100% rename from fuzzers/libfuzzer_libpng/corpus/not_kitty.png rename to fuzzers/binary_only/qemu_cmin/corpus/not_kitty.png diff --git a/fuzzers/libfuzzer_libpng/corpus/not_kitty_alpha.png b/fuzzers/binary_only/qemu_cmin/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/libfuzzer_libpng/corpus/not_kitty_alpha.png rename to fuzzers/binary_only/qemu_cmin/corpus/not_kitty_alpha.png diff --git a/fuzzers/libfuzzer_libpng/corpus/not_kitty_gamma.png b/fuzzers/binary_only/qemu_cmin/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/libfuzzer_libpng/corpus/not_kitty_gamma.png rename to fuzzers/binary_only/qemu_cmin/corpus/not_kitty_gamma.png diff --git a/fuzzers/libfuzzer_libpng/corpus/not_kitty_icc.png b/fuzzers/binary_only/qemu_cmin/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/libfuzzer_libpng/corpus/not_kitty_icc.png rename to fuzzers/binary_only/qemu_cmin/corpus/not_kitty_icc.png diff --git a/fuzzers/qemu_cmin/harness.cc b/fuzzers/binary_only/qemu_cmin/harness.cc similarity index 100% rename from fuzzers/qemu_cmin/harness.cc rename to fuzzers/binary_only/qemu_cmin/harness.cc diff --git a/fuzzers/qemu_cmin/src/fuzzer.rs b/fuzzers/binary_only/qemu_cmin/src/fuzzer.rs similarity index 83% rename from fuzzers/qemu_cmin/src/fuzzer.rs rename to fuzzers/binary_only/qemu_cmin/src/fuzzer.rs index efbdf5a614..ba00041efd 100644 --- a/fuzzers/qemu_cmin/src/fuzzer.rs +++ b/fuzzers/binary_only/qemu_cmin/src/fuzzer.rs @@ -2,7 +2,7 @@ //! #[cfg(feature = "i386")] use core::mem::size_of; -use std::{env, io, path::PathBuf, process}; +use std::{env, fmt::Write, io, path::PathBuf, process, ptr::NonNull}; use clap::{builder::Str, Parser}; use libafl::{ @@ -27,11 +27,11 @@ use libafl_bolts::{ AsSlice, AsSliceMut, }; use libafl_qemu::{ - edges::{QemuEdgeCoverageChildHelper, EDGES_MAP_PTR, EDGES_MAP_SIZE_IN_USE}, - elf::EasyElf, - ArchExtras, CallingConvention, GuestAddr, GuestReg, MmapPerms, Qemu, QemuExitError, - QemuExitReason, QemuForkExecutor, QemuHooks, QemuShutdownCause, Regs, + elf::EasyElf, modules::edges::StdEdgeCoverageChildModule, ArchExtras, CallingConvention, + Emulator, GuestAddr, GuestReg, MmapPerms, Qemu, QemuExitError, QemuExitReason, + QemuForkExecutor, QemuShutdownCause, Regs, }; +use libafl_targets::{EDGES_MAP_DEFAULT_SIZE, EDGES_MAP_PTR}; #[derive(Default)] pub struct Version; @@ -52,8 +52,10 @@ impl From for Str { ("Cargo Target Triple", env!("VERGEN_CARGO_TARGET_TRIPLE")), ] .iter() - .map(|(k, v)| format!("{k:25}: {v}\n")) - .collect::(); + .fold(String::new(), |mut output, (k, v)| { + let _ = writeln!(output, "{k:25}: {v}"); + output + }); format!("\n{version:}").into() } @@ -65,7 +67,7 @@ impl From for Str { name = format ! ("qemu_cmin-{}", env ! ("CPU_TARGET")), version = Version::default(), about, -long_about = "Tool for generating minimizing corpus using QEMU instrumentation" +long_about = "Module for generating minimizing corpus using QEMU instrumentation" )] pub struct FuzzerOptions { #[arg(long, help = "Output directory")] @@ -111,8 +113,7 @@ pub fn fuzz() -> Result<(), Error> { log::debug!("ARGS: {:#?}", options.args); env::remove_var("LD_LIBRARY_PATH"); - let env: Vec<(String, String)> = env::vars().collect(); - let qemu = Qemu::init(&options.args, &env).unwrap(); + let qemu = Qemu::init(&options.args).unwrap(); let mut elf_buffer = Vec::new(); let elf = EasyElf::from_file(qemu.binary_path(), &mut elf_buffer).unwrap(); @@ -156,14 +157,16 @@ pub fn fuzz() -> Result<(), Error> { }, }; - let mut edges_shmem = shmem_provider.new_shmem(EDGES_MAP_SIZE_IN_USE).unwrap(); + let mut edges_shmem = shmem_provider.new_shmem(EDGES_MAP_DEFAULT_SIZE).unwrap(); let edges = edges_shmem.as_slice_mut(); unsafe { EDGES_MAP_PTR = edges.as_mut_ptr() }; - let edges_observer = unsafe { - HitcountsMapObserver::new(ConstMapObserver::<_, EDGES_MAP_SIZE_IN_USE>::from_mut_ptr( + let mut edges_observer = unsafe { + HitcountsMapObserver::new(ConstMapObserver::from_mut_ptr( "edges", - edges.as_mut_ptr(), + NonNull::new(edges.as_mut_ptr()) + .expect("The edge map pointer is null.") + .cast::<[u8; EDGES_MAP_DEFAULT_SIZE]>(), )) }; @@ -186,7 +189,7 @@ pub fn fuzz() -> Result<(), Error> { let scheduler = QueueScheduler::new(); let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); - let mut harness = |input: &BytesInput| { + let mut harness = |_emulator: &mut Emulator<_, _, _, _, _>, input: &BytesInput| { let target = input.target_bytes(); let mut buf = target.as_slice(); let mut len = buf.len(); @@ -197,7 +200,8 @@ pub fn fuzz() -> Result<(), Error> { let len = len as GuestReg; unsafe { - qemu.write_mem(input_addr, buf); + qemu.write_mem(input_addr, buf).expect("qemu write failed."); + qemu.write_reg(Regs::Pc, test_one_input_ptr).unwrap(); qemu.write_reg(Regs::Sp, stack_ptr).unwrap(); qemu.write_return_address(ret_addr).unwrap(); @@ -219,10 +223,14 @@ pub fn fuzz() -> Result<(), Error> { ExitKind::Ok }; - let mut hooks = QemuHooks::new(qemu, tuple_list!(QemuEdgeCoverageChildHelper::default(),)); + let modules = tuple_list!(StdEdgeCoverageChildModule::builder() + .const_map_observer(edges_observer.as_mut()) + .build()?); + + let emulator = Emulator::empty().qemu(qemu).modules(modules).build()?; let mut executor = QemuForkExecutor::new( - &mut hooks, + emulator, &mut harness, tuple_list!(edges_observer), &mut fuzzer, diff --git a/fuzzers/qemu_cmin/src/main.rs b/fuzzers/binary_only/qemu_cmin/src/main.rs similarity index 100% rename from fuzzers/qemu_cmin/src/main.rs rename to fuzzers/binary_only/qemu_cmin/src/main.rs diff --git a/fuzzers/qemu_coverage/.gitignore b/fuzzers/binary_only/qemu_coverage/.gitignore similarity index 100% rename from fuzzers/qemu_coverage/.gitignore rename to fuzzers/binary_only/qemu_coverage/.gitignore diff --git a/fuzzers/binary_only/qemu_coverage/Cargo.toml b/fuzzers/binary_only/qemu_coverage/Cargo.toml new file mode 100644 index 0000000000..d4b402315c --- /dev/null +++ b/fuzzers/binary_only/qemu_coverage/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "qemu_coverage" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", + "WorksButNotTested", +] +edition = "2021" + +[profile.release] +#lto = true +#codegen-units = 1 +#opt-level = 3 +debug = true + +[features] +default = ["std"] +std = [] +be = ["libafl_qemu/be"] +arm = ["libafl_qemu/arm"] +x86_64 = ["libafl_qemu/x86_64"] +i386 = ["libafl_qemu/i386"] +aarch64 = ["libafl_qemu/aarch64"] +mips = ["libafl_qemu/mips"] +ppc = ["libafl_qemu/ppc", "be"] + +[build-dependencies] +vergen = { version = "9.0.1", features = ["build", "cargo", "rustc", "si"] } +vergen-git2 = "1.0.1" + +[dependencies] +clap = { version = "4.5.18", features = ["derive", "string"] } +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_qemu = { path = "../../../libafl_qemu", features = ["usermode"] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +rangemap = { version = "1.5.1" } diff --git a/fuzzers/qemu_coverage/Makefile.toml b/fuzzers/binary_only/qemu_coverage/Makefile.toml similarity index 74% rename from fuzzers/qemu_coverage/Makefile.toml rename to fuzzers/binary_only/qemu_coverage/Makefile.toml index 3ec4818066..97b30e62d0 100644 --- a/fuzzers/qemu_coverage/Makefile.toml +++ b/fuzzers/binary_only/qemu_coverage/Makefile.toml @@ -1,6 +1,8 @@ [env] -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } CROSS_CC = "x86_64-linux-gnu-gcc" CROSS_CXX = "x86_64-linux-gnu-g++" CROSS_CFLAGS = "" @@ -73,32 +75,32 @@ FEATURE = "ppc" LIBAFL_QEMU_CLONE_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/qemu-libafl-bridge" [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Qemu fuzzer not supported on windows/mac" ''' [tasks.target_dir] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = ["${CARGO_MAKE_CRATE_TARGET_DIRECTORY}"] } +script_runner = "@shell" +script = ''' mkdir ${CARGO_MAKE_CRATE_TARGET_DIRECTORY} ''' [tasks.deps_dir] dependencies = ["target_dir"] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = ["${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/"] } +script_runner = "@shell" +script = ''' mkdir ${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/ ''' [tasks.arch_target_dir] dependencies = ["target_dir"] -condition = { files_not_exist = [ "${TARGET_DIR}" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = ["${TARGET_DIR}"] } +script_runner = "@shell" +script = ''' mkdir ${TARGET_DIR} ''' @@ -109,11 +111,13 @@ windows_alias = "unsupported" [tasks.zlib_unix_wget] dependencies = ["deps_dir"] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/zlib-1.2.13" ] } -script_runner="@shell" +condition = { files_not_exist = [ + "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/zlib-1.2.13", +] } +script_runner = "@shell" # NOTE: There's no specific reason we're using an old version of zlib, # but newer versions get moved to fossils/ after a while. -script=''' +script = ''' wget \ -O "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/zlib-1.2.13.tar.gz" \ https://zlib.net/fossils/zlib-1.2.13.tar.gz @@ -124,10 +128,10 @@ tar \ ''' [tasks.zlib_unix] -dependencies = ["arch_target_dir", "zlib_unix_wget" ] -condition = { files_not_exist = [ "${TARGET_DIR}/build-zlib/libz.a" ] } -script_runner="@shell" -script=''' +dependencies = ["arch_target_dir", "zlib_unix_wget"] +condition = { files_not_exist = ["${TARGET_DIR}/build-zlib/libz.a"] } +script_runner = "@shell" +script = ''' rm -rf ${TARGET_DIR}/build-zlib/ mkdir ${TARGET_DIR}/build-zlib/ @@ -148,9 +152,11 @@ windows_alias = "unsupported" [tasks.libpng_unix_wget] dependencies = ["deps_dir"] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/libpng-1.6.37" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = [ + "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/libpng-1.6.37", +] } +script_runner = "@shell" +script = ''' wget \ -O "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/v1.6.37.tar.gz" \ https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz @@ -161,10 +167,10 @@ tar \ ''' [tasks.libpng_unix] -dependencies = [ "arch_target_dir", "zlib", "libpng_unix_wget" ] -condition = { files_not_exist = [ "${TARGET_DIR}/build-png/.libs/libpng16.a" ] } -script_runner="@shell" -script=''' +dependencies = ["arch_target_dir", "zlib", "libpng_unix_wget"] +condition = { files_not_exist = ["${TARGET_DIR}/build-png/.libs/libpng16.a"] } +script_runner = "@shell" +script = ''' rm -rf ${TARGET_DIR}/build-png/ mkdir ${TARGET_DIR}/build-png/ @@ -190,17 +196,19 @@ windows_alias = "unsupported" [tasks.build_unix] command = "cargo" args = [ - "build", - "--profile", - "${PROFILE}", - "--features", "${FEATURE}", - "--target-dir", "${TARGET_DIR}" + "build", + "--profile", + "${PROFILE}", + "--features", + "${FEATURE}", + "--target-dir", + "${TARGET_DIR}", ] [tasks.fuzzer] dependencies = ["build"] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ${TARGET_DIR}/${PROFILE_DIR}/qemu_coverage-${CARGO_MAKE_PROFILE} mv ${TARGET_DIR}/${PROFILE_DIR}/qemu_coverage ${TARGET_DIR}/${PROFILE_DIR}/qemu_coverage-${CARGO_MAKE_PROFILE} ''' @@ -211,8 +219,8 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.harness_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' ${CROSS_CXX} \ ./harness.cc \ $CROSS_CFLAGS \ @@ -225,7 +233,7 @@ ${CROSS_CXX} \ -lm \ -static ''' -dependencies = [ "libpng" ] +dependencies = ["libpng"] [tasks.run] linux_alias = "run_unix" @@ -235,12 +243,14 @@ windows_alias = "unsupported" [tasks.run_unix] command = "${TARGET_DIR}/${PROFILE_DIR}/qemu_coverage-${CARGO_MAKE_PROFILE}" args = [ - "--coverage", "${TARGET_DIR}/drcov.log", - "--input", "./corpus", - "--", - "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", + "--coverage-path", + "${TARGET_DIR}/drcov.log", + "--input-dir", + "./corpus", + "--", + "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", ] -dependencies = [ "harness", "fuzzer" ] +dependencies = ["harness", "fuzzer"] [tasks.test] linux_alias = "test_unix" @@ -248,13 +258,10 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.test_unix] -dependencies = [ "lightweight" ] +dependencies = ["lightweight"] # Tidy up after we've run our tests so we don't hog all the disk space command = "cargo" -args = [ - "make", - "clean", -] +args = ["make", "clean"] [tasks.test_full] linux_alias = "test_unix_full" @@ -262,13 +269,10 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.test_unix_full] -dependencies = [ "all" ] +dependencies = ["all"] # Tidy up after we've run our tests so we don't hog all the disk space command = "cargo" -args = [ - "make", - "clean", -] +args = ["make", "clean"] [tasks.clean] linux_alias = "clean_unix" @@ -278,72 +282,38 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf ${CARGO_MAKE_CRATE_TARGET_DIRECTORY} cargo clean ''' [tasks.arm] command = "cargo" -args = [ - "make", - "-p", "arm", - "run", -] +args = ["make", "-p", "arm", "run"] [tasks.aarch64] command = "cargo" -args = [ - "make", - "-p", "aarch64", - "run", -] +args = ["make", "-p", "aarch64", "run"] [tasks.x86_64] command = "cargo" -args = [ - "make", - "-p", "x86_64", - "run", -] +args = ["make", "-p", "x86_64", "run"] [tasks.i386] command = "cargo" -args = [ - "make", - "-p", "i386", - "run", -] +args = ["make", "-p", "i386", "run"] [tasks.mips] command = "cargo" -args = [ - "make", - "-p", "mips", - "run", -] +args = ["make", "-p", "mips", "run"] [tasks.ppc] command = "cargo" -args = [ - "make", - "-p", "ppc", - "run", -] +args = ["make", "-p", "ppc", "run"] [tasks.all] -dependencies = [ - "arm", - "aarch64", - "x86_64", - "i386", - "mips", - "ppc" -] +dependencies = ["arm", "aarch64", "x86_64", "i386", "mips", "ppc"] [tasks.lightweight] -dependencies = [ - "arm", - "x86_64", -] +dependencies = ["arm", "x86_64"] diff --git a/fuzzers/qemu_coverage/README.md b/fuzzers/binary_only/qemu_coverage/README.md similarity index 100% rename from fuzzers/qemu_coverage/README.md rename to fuzzers/binary_only/qemu_coverage/README.md diff --git a/fuzzers/qemu_coverage/build.rs b/fuzzers/binary_only/qemu_coverage/build.rs similarity index 60% rename from fuzzers/qemu_coverage/build.rs rename to fuzzers/binary_only/qemu_coverage/build.rs index 16317b1560..4a3e22ea6a 100644 --- a/fuzzers/qemu_coverage/build.rs +++ b/fuzzers/binary_only/qemu_coverage/build.rs @@ -1,11 +1,12 @@ -use vergen::EmitBuilder; +use vergen::{BuildBuilder, CargoBuilder, Emitter, RustcBuilder, SysinfoBuilder}; +use vergen_git2::Git2Builder; #[macro_export] macro_rules! assert_unique_feature { () => {}; ($first:tt $(,$rest:tt)*) => { $( - #[cfg(all(not(any(doc, feature = "clippy")), feature = $first, feature = $rest))] + #[cfg(all(not(doc), feature = $first, feature = $rest))] compile_error!(concat!("features \"", $first, "\" and \"", $rest, "\" cannot be used together")); )* assert_unique_feature!($($rest),*); @@ -13,12 +14,23 @@ macro_rules! assert_unique_feature { } fn main() { - EmitBuilder::builder() - .all_build() - .all_cargo() - .all_git() - .all_rustc() - .all_sysinfo() + let build = BuildBuilder::all_build().unwrap(); + let cargo = CargoBuilder::all_cargo().unwrap(); + let git = Git2Builder::all_git().unwrap(); + let rustc = RustcBuilder::all_rustc().unwrap(); + let sysinfo = SysinfoBuilder::all_sysinfo().unwrap(); + + Emitter::default() + .add_instructions(&build) + .unwrap() + .add_instructions(&cargo) + .unwrap() + .add_instructions(&git) + .unwrap() + .add_instructions(&rustc) + .unwrap() + .add_instructions(&sysinfo) + .unwrap() .emit() .unwrap(); diff --git a/fuzzers/libfuzzer_libpng_accounting/corpus/not_kitty.png b/fuzzers/binary_only/qemu_coverage/corpus/not_kitty.png similarity index 100% rename from fuzzers/libfuzzer_libpng_accounting/corpus/not_kitty.png rename to fuzzers/binary_only/qemu_coverage/corpus/not_kitty.png diff --git a/fuzzers/libfuzzer_libpng_accounting/corpus/not_kitty_alpha.png b/fuzzers/binary_only/qemu_coverage/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/libfuzzer_libpng_accounting/corpus/not_kitty_alpha.png rename to fuzzers/binary_only/qemu_coverage/corpus/not_kitty_alpha.png diff --git a/fuzzers/libfuzzer_libpng_accounting/corpus/not_kitty_gamma.png b/fuzzers/binary_only/qemu_coverage/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/libfuzzer_libpng_accounting/corpus/not_kitty_gamma.png rename to fuzzers/binary_only/qemu_coverage/corpus/not_kitty_gamma.png diff --git a/fuzzers/libfuzzer_libpng_accounting/corpus/not_kitty_icc.png b/fuzzers/binary_only/qemu_coverage/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/libfuzzer_libpng_accounting/corpus/not_kitty_icc.png rename to fuzzers/binary_only/qemu_coverage/corpus/not_kitty_icc.png diff --git a/fuzzers/qemu_coverage/harness.cc b/fuzzers/binary_only/qemu_coverage/harness.cc similarity index 100% rename from fuzzers/qemu_coverage/harness.cc rename to fuzzers/binary_only/qemu_coverage/harness.cc diff --git a/fuzzers/qemu_coverage/src/fuzzer.rs b/fuzzers/binary_only/qemu_coverage/src/fuzzer.rs similarity index 56% rename from fuzzers/qemu_coverage/src/fuzzer.rs rename to fuzzers/binary_only/qemu_coverage/src/fuzzer.rs index f79a08e4b9..45f80b0f47 100644 --- a/fuzzers/qemu_coverage/src/fuzzer.rs +++ b/fuzzers/binary_only/qemu_coverage/src/fuzzer.rs @@ -3,12 +3,15 @@ #[cfg(feature = "i386")] use core::mem::size_of; use core::time::Duration; -use std::{env, fs::DirEntry, io, path::PathBuf, process}; +use std::{env, fmt::Write, fs::DirEntry, io, path::PathBuf, process}; use clap::{builder::Str, Parser}; use libafl::{ corpus::{Corpus, NopCorpus}, - events::{launcher::Launcher, EventConfig, EventRestarter, LlmpRestartingEventManager}, + events::{ + launcher::Launcher, ClientDescription, EventConfig, EventRestarter, + LlmpRestartingEventManager, + }, executors::ExitKind, fuzzer::StdFuzzer, inputs::{BytesInput, HasTargetBytes}, @@ -26,11 +29,11 @@ use libafl_bolts::{ AsSlice, }; use libafl_qemu::{ - drcov::QemuDrCovHelper, elf::EasyElf, ArchExtras, CallingConvention, GuestAddr, GuestReg, - MmapPerms, Qemu, QemuExecutor, QemuExitReason, QemuHooks, - QemuInstrumentationAddressRangeFilter, QemuRWError, QemuShutdownCause, Regs, + elf::EasyElf, + modules::{drcov::DrCovModule, StdAddressFilter}, + ArchExtras, CallingConvention, Emulator, GuestAddr, GuestReg, MmapPerms, Qemu, QemuExecutor, + QemuExitReason, QemuRWError, QemuShutdownCause, Regs, }; -use rangemap::RangeMap; #[derive(Default)] pub struct Version; @@ -56,8 +59,10 @@ impl From for Str { ("Cargo Target Triple", env!("VERGEN_CARGO_TARGET_TRIPLE")), ] .iter() - .map(|(k, v)| format!("{k:25}: {v}\n")) - .collect::(); + .fold(String::new(), |mut output, (k, v)| { + let _ = writeln!(output, "{k:25}: {v}"); + output + }); format!("\n{version:}").into() } @@ -69,14 +74,14 @@ impl From for Str { name = format!("qemu_coverage-{}",env!("CPU_TARGET")), version = Version::default(), about, - long_about = "Tool for generating DrCov coverage data using QEMU instrumentation" + long_about = "Module for generating DrCov coverage data using QEMU instrumentation" )] pub struct FuzzerOptions { #[arg(long, help = "Coverage file")] - coverage: String, + coverage_path: PathBuf, #[arg(long, help = "Input directory")] - input: String, + input_dir: PathBuf, #[arg(long, help = "Timeout in seconds", default_value = "5000", value_parser = timeout_from_millis_str)] timeout: Duration, @@ -99,9 +104,8 @@ pub const MAX_INPUT_SIZE: usize = 1048576; // 1MB pub fn fuzz() { let mut options = FuzzerOptions::parse(); - let corpus_dir = PathBuf::from(options.input); - - let corpus_files = corpus_dir + let corpus_files = options + .input_dir .read_dir() .expect("Failed to read corpus dir") .collect::, io::Error>>() @@ -118,8 +122,8 @@ pub fn fuzz() { log::debug!("ARGS: {:#?}", options.args); env::remove_var("LD_LIBRARY_PATH"); - let env: Vec<(String, String)> = env::vars().collect(); - let qemu = Qemu::init(&options.args, &env).unwrap(); + + let qemu = Qemu::init(&options.args).unwrap(); let mut elf_buffer = Vec::new(); let elf = EasyElf::from_file(qemu.binary_path(), &mut elf_buffer).unwrap(); @@ -157,7 +161,7 @@ pub fn fuzz() { let reset = |buf: &[u8], len: GuestReg| -> Result<(), QemuRWError> { unsafe { - qemu.write_mem(input_addr, buf); + let _ = qemu.write_mem(input_addr, buf); qemu.write_reg(Regs::Pc, test_one_input_ptr)?; qemu.write_reg(Regs::Sp, stack_ptr)?; qemu.write_return_address(ret_addr)?; @@ -176,115 +180,103 @@ pub fn fuzz() { } }; - let mut harness = |input: &BytesInput| { - let target = input.target_bytes(); - let mut buf = target.as_slice(); - let mut len = buf.len(); - if len > MAX_INPUT_SIZE { - buf = &buf[0..MAX_INPUT_SIZE]; - len = MAX_INPUT_SIZE; - } - let len = len as GuestReg; - reset(buf, len).unwrap(); - ExitKind::Ok - }; - - let mut run_client = - |state: Option<_>, mut mgr: LlmpRestartingEventManager<_, _, _>, core_id| { - let core_idx = options - .cores - .position(core_id) - .expect("Failed to get core index"); - let files = corpus_files - .iter() - .skip(files_per_core * core_idx) - .take(files_per_core) - .map(|x| x.path()) - .collect::>(); - - if files.is_empty() { - mgr.send_exiting()?; - Err(Error::ShuttingDown)? + let mut harness = + |_emulator: &mut Emulator<_, _, _, _, _>, _state: &mut _, input: &BytesInput| { + let target = input.target_bytes(); + let mut buf = target.as_slice(); + let mut len = buf.len(); + if len > MAX_INPUT_SIZE { + buf = &buf[0..MAX_INPUT_SIZE]; + len = MAX_INPUT_SIZE; } + let len = len as GuestReg; + reset(buf, len).unwrap(); + ExitKind::Ok + }; - #[allow(clippy::let_unit_value)] - let mut feedback = (); - - #[allow(clippy::let_unit_value)] - let mut objective = (); - - let mut state = state.unwrap_or_else(|| { - StdState::new( - StdRand::new(), - NopCorpus::new(), - NopCorpus::new(), - &mut feedback, - &mut objective, - ) - .unwrap() - }); - - let scheduler = QueueScheduler::new(); - let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); - - let rangemap = qemu - .mappings() - .filter_map(|m| { - m.path() - .map(|p| ((m.start() as usize)..(m.end() as usize), p.to_string())) - .filter(|(_, p)| !p.is_empty()) - }) - .enumerate() - .fold( - RangeMap::::new(), - |mut rm, (i, (r, p))| { - rm.insert(r, (i as u16, p)); - rm - }, - ); - - let mut coverage = PathBuf::from(&options.coverage); - let coverage_name = coverage.file_stem().unwrap().to_str().unwrap(); - let coverage_extension = coverage.extension().unwrap_or_default().to_str().unwrap(); - let core = core_id.0; - coverage.set_file_name(format!("{coverage_name}-{core:03}.{coverage_extension}")); - - let mut hooks = QemuHooks::new( - qemu, - tuple_list!(QemuDrCovHelper::new( - QemuInstrumentationAddressRangeFilter::None, - rangemap, - coverage, - false, - )), - ); - - let mut executor = QemuExecutor::new( - &mut hooks, - &mut harness, - (), - &mut fuzzer, - &mut state, - &mut mgr, - options.timeout, - ) - .expect("Failed to create QemuExecutor"); - - if state.must_load_initial_inputs() { - state - .load_initial_inputs_by_filenames(&mut fuzzer, &mut executor, &mut mgr, &files) - .unwrap_or_else(|_| { - println!("Failed to load initial corpus at {:?}", &corpus_dir); - process::exit(0); - }); - log::debug!("We imported {} inputs from disk.", state.corpus().count()); - } - - log::debug!("Processed {} inputs from disk.", files.len()); + let mut run_client = |state: Option<_>, + mut mgr: LlmpRestartingEventManager<_, _, _>, + client_description: ClientDescription| { + let core_id = client_description.core_id(); + let core_idx = options + .cores + .position(core_id) + .expect("Failed to get core index"); + let files = corpus_files + .iter() + .skip(files_per_core * core_idx) + .take(files_per_core) + .map(|x| x.path()) + .collect::>(); + if files.is_empty() { mgr.send_exiting()?; Err(Error::ShuttingDown)? - }; + } + + #[allow(clippy::let_unit_value)] + let mut feedback = (); + + #[allow(clippy::let_unit_value)] + let mut objective = (); + + let mut state = state.unwrap_or_else(|| { + StdState::new( + StdRand::new(), + NopCorpus::new(), + NopCorpus::new(), + &mut feedback, + &mut objective, + ) + .unwrap() + }); + + let scheduler = QueueScheduler::new(); + let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); + + let mut cov_path = options.coverage_path.clone(); + let coverage_name = cov_path.file_stem().unwrap().to_str().unwrap(); + let coverage_extension = cov_path.extension().unwrap_or_default().to_str().unwrap(); + let core = core_id.0; + cov_path.set_file_name(format!("{coverage_name}-{core:03}.{coverage_extension}")); + + let emulator_modules = tuple_list!(DrCovModule::builder() + .filter(StdAddressFilter::default()) + .filename(cov_path) + .full_trace(false) + .build()); + + let emulator = Emulator::empty() + .qemu(qemu) + .modules(emulator_modules) + .build()?; + + let mut executor = QemuExecutor::new( + emulator, + &mut harness, + (), + &mut fuzzer, + &mut state, + &mut mgr, + options.timeout, + ) + .expect("Failed to create QemuExecutor"); + + if state.must_load_initial_inputs() { + state + .load_initial_inputs_by_filenames(&mut fuzzer, &mut executor, &mut mgr, &files) + .unwrap_or_else(|_| { + println!("Failed to load initial corpus at {:?}", &options.input_dir); + process::exit(0); + }); + log::debug!("We imported {} inputs from disk.", state.corpus().count()); + } + + log::debug!("Processed {} inputs from disk.", files.len()); + + mgr.send_exiting()?; + Err(Error::ShuttingDown)? + }; match Launcher::builder() .shmem_provider(StdShMemProvider::new().expect("Failed to init shared memory")) diff --git a/fuzzers/qemu_coverage/src/main.rs b/fuzzers/binary_only/qemu_coverage/src/main.rs similarity index 100% rename from fuzzers/qemu_coverage/src/main.rs rename to fuzzers/binary_only/qemu_coverage/src/main.rs diff --git a/fuzzers/qemu_launcher/.gitignore b/fuzzers/binary_only/qemu_launcher/.gitignore similarity index 100% rename from fuzzers/qemu_launcher/.gitignore rename to fuzzers/binary_only/qemu_launcher/.gitignore diff --git a/fuzzers/binary_only/qemu_launcher/Cargo.toml b/fuzzers/binary_only/qemu_launcher/Cargo.toml new file mode 100644 index 0000000000..f2c4d8b4b4 --- /dev/null +++ b/fuzzers/binary_only/qemu_launcher/Cargo.toml @@ -0,0 +1,55 @@ +[package] +name = "qemu_launcher" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std", "injections"] +std = [] +clippy = [] # Only for clippy, don't use. + +## Build with a simple event manager instead of Launcher - don't fork, and crash after the first bug. +simplemgr = [] + +## Enable fuzzing for injections (where supported) +injections = ["libafl_qemu/injections"] + +## Set emulator to big endian +be = ["libafl_qemu/be"] + +#! ## Mutually exclusive architectures +arm = ["libafl_qemu/arm"] +x86_64 = ["libafl_qemu/x86_64"] +i386 = ["libafl_qemu/i386"] +aarch64 = ["libafl_qemu/aarch64"] +mips = ["libafl_qemu/mips"] +ppc = ["libafl_qemu/ppc", "be"] +hexagon = ["libafl_qemu/hexagon"] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +vergen = { version = "9.0.1", features = ["build", "cargo", "rustc", "si"] } +vergen-git2 = "1.0.1" + +[dependencies] +clap = { version = "4.5.18", features = ["derive", "string"] } +libafl = { path = "../../../libafl", features = ["tui_monitor"] } +libafl_bolts = { path = "../../../libafl_bolts", features = [ + "errors_backtrace", +] } +libafl_qemu = { path = "../../../libafl_qemu", features = ["usermode"] } +libafl_targets = { path = "../../../libafl_targets" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +nix = { version = "0.29.0", features = ["fs"] } +rangemap = { version = "1.5.1" } +readonly = { version = "0.2.12" } +typed-builder = { version = "0.20.0" } diff --git a/fuzzers/qemu_launcher/Makefile.toml b/fuzzers/binary_only/qemu_launcher/Makefile.toml similarity index 67% rename from fuzzers/qemu_launcher/Makefile.toml rename to fuzzers/binary_only/qemu_launcher/Makefile.toml index 6f508eaf03..5abd78d8ec 100644 --- a/fuzzers/qemu_launcher/Makefile.toml +++ b/fuzzers/binary_only/qemu_launcher/Makefile.toml @@ -1,6 +1,8 @@ [env] -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } CROSS_CC = "x86_64-linux-gnu-gcc" CROSS_CXX = "x86_64-linux-gnu-g++" CROSS_CFLAGS = "" @@ -66,31 +68,31 @@ LIBPNG_OPTIMIZATIONS = "no" FEATURE = "ppc" [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Qemu fuzzer not supported on windows/mac" ''' [tasks.target_dir] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = ["${CARGO_MAKE_CRATE_TARGET_DIRECTORY}"] } +script_runner = "@shell" +script = ''' mkdir ${CARGO_MAKE_CRATE_TARGET_DIRECTORY} ''' [tasks.deps_dir] dependencies = ["target_dir"] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = ["${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/"] } +script_runner = "@shell" +script = ''' mkdir ${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/ ''' [tasks.arch_target_dir] dependencies = ["target_dir"] -condition = { files_not_exist = [ "${TARGET_DIR}" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = ["${TARGET_DIR}"] } +script_runner = "@shell" +script = ''' mkdir ${TARGET_DIR} ''' @@ -101,11 +103,13 @@ windows_alias = "unsupported" [tasks.zlib_unix_wget] dependencies = ["deps_dir"] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/zlib-1.2.13" ] } -script_runner="@shell" +condition = { files_not_exist = [ + "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/zlib-1.2.13", +] } +script_runner = "@shell" # NOTE: There's no specific reason we're using an old version of zlib, # but newer versions get moved to fossils/ after a while. -script=''' +script = ''' wget \ -O "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/zlib-1.2.13.tar.gz" \ https://zlib.net/fossils/zlib-1.2.13.tar.gz @@ -116,10 +120,10 @@ tar \ ''' [tasks.zlib_unix] -dependencies = ["arch_target_dir", "zlib_unix_wget" ] -condition = { files_not_exist = [ "${TARGET_DIR}/build-zlib/libz.a" ] } -script_runner="@shell" -script=''' +dependencies = ["arch_target_dir", "zlib_unix_wget"] +condition = { files_not_exist = ["${TARGET_DIR}/build-zlib/libz.a"] } +script_runner = "@shell" +script = ''' rm -rf ${TARGET_DIR}/build-zlib/ mkdir ${TARGET_DIR}/build-zlib/ @@ -140,9 +144,11 @@ windows_alias = "unsupported" [tasks.libpng_unix_wget] dependencies = ["deps_dir"] -condition = { files_not_exist = [ "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/libpng-1.6.37" ] } -script_runner="@shell" -script=''' +condition = { files_not_exist = [ + "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/libpng-1.6.37", +] } +script_runner = "@shell" +script = ''' wget \ -O "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/deps/v1.6.37.tar.gz" \ https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz @@ -153,10 +159,10 @@ tar \ ''' [tasks.libpng_unix] -dependencies = [ "arch_target_dir", "zlib", "libpng_unix_wget" ] -condition = { files_not_exist = [ "${TARGET_DIR}/build-png/.libs/libpng16.a" ] } -script_runner="@shell" -script=''' +dependencies = ["arch_target_dir", "zlib", "libpng_unix_wget"] +condition = { files_not_exist = ["${TARGET_DIR}/build-png/.libs/libpng16.a"] } +script_runner = "@shell" +script = ''' rm -rf ${TARGET_DIR}/build-png/ mkdir ${TARGET_DIR}/build-png/ @@ -183,17 +189,19 @@ windows_alias = "unsupported" [tasks.build_unix] command = "cargo" args = [ - "build", - "--profile", - "${PROFILE}", - "--features", "${FEATURE}", - "--target-dir", "${TARGET_DIR}" + "build", + "--profile", + "${PROFILE}", + "--features", + "${FEATURE}", + "--target-dir", + "${TARGET_DIR}", ] [tasks.fuzzer] dependencies = ["build"] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ${TARGET_DIR}/${PROFILE_DIR}/qemu_launcher-${CARGO_MAKE_PROFILE} mv ${TARGET_DIR}/${PROFILE_DIR}/qemu_launcher ${TARGET_DIR}/${PROFILE_DIR}/qemu_launcher-${CARGO_MAKE_PROFILE} ''' @@ -204,8 +212,8 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.harness_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' ${CROSS_CXX} \ ./harness.cc \ $CROSS_CFLAGS \ @@ -218,7 +226,7 @@ ${CROSS_CXX} \ -o"${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}" \ -lm ''' -dependencies = [ "libpng" ] +dependencies = ["libpng"] [tasks.debug] linux_alias = "debug_unix" @@ -228,18 +236,25 @@ windows_alias = "unsupported" [tasks.debug_unix] command = "${TARGET_DIR}/${PROFILE_DIR}/qemu_launcher-${CARGO_MAKE_PROFILE}" args = [ - "--input", "./corpus", - "--output", "${TARGET_DIR}/output/", - "--log", "${TARGET_DIR}/output/log.txt", - "--cores", "0-7", - "--asan-cores", "0-3", - "--cmplog-cores", "2-5", - "--iterations", "100000", - "--verbose", - "--", - "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", + "--input", + "./corpus", + "--output", + "${TARGET_DIR}/output/", + "--log", + "${TARGET_DIR}/output/log.txt", + "--cores", + "0-7", + "--asan-cores", + "0-3", + "--cmplog-cores", + "2-5", + "--iterations", + "100000", + "--verbose", + "--", + "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", ] -dependencies = [ "harness", "fuzzer" ] +dependencies = ["harness", "fuzzer"] [tasks.run] linux_alias = "run_unix" @@ -249,18 +264,25 @@ windows_alias = "unsupported" [tasks.run_unix] command = "${TARGET_DIR}/${PROFILE_DIR}/qemu_launcher-${CARGO_MAKE_PROFILE}" args = [ - "--input", "./corpus", - "--output", "${TARGET_DIR}/output/", - "--log", "${TARGET_DIR}/output/log.txt", - "--cores", "0-7", - "--asan-cores", "0-3", - "--cmplog-cores", "2-5", - "--iterations", "1000000", - "--tui", - "--", - "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", + "--input", + "./corpus", + "--output", + "${TARGET_DIR}/output/", + "--log", + "${TARGET_DIR}/output/log.txt", + "--cores", + "0-7", + "--asan-cores", + "0-3", + "--cmplog-cores", + "2-5", + "--iterations", + "1000000", + "--tui", + "--", + "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", ] -dependencies = [ "harness", "fuzzer" ] +dependencies = ["harness", "fuzzer"] [tasks.single] linux_alias = "single_unix" @@ -270,14 +292,18 @@ windows_alias = "unsupported" [tasks.single_unix] command = "${TARGET_DIR}/${PROFILE_DIR}/qemu_launcher-${CARGO_MAKE_PROFILE}" args = [ - "--input", "./corpus", - "--output", "${TARGET_DIR}/output/", - "--log", "${TARGET_DIR}/output/log.txt", - "--cores", "0", - "--", - "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", + "--input", + "./corpus", + "--output", + "${TARGET_DIR}/output/", + "--log", + "${TARGET_DIR}/output/log.txt", + "--cores", + "0", + "--", + "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", ] -dependencies = [ "harness", "fuzzer" ] +dependencies = ["harness", "fuzzer"] [tasks.asan] linux_alias = "asan_unix" @@ -287,15 +313,20 @@ windows_alias = "unsupported" [tasks.asan_unix] command = "${TARGET_DIR}/${PROFILE_DIR}/qemu_launcher-${CARGO_MAKE_PROFILE}" args = [ - "--input", "./corpus", - "--output", "${TARGET_DIR}/output/", - "--log", "${TARGET_DIR}/output/log.txt", - "--cores", "0", - "--asan-cores", "0", - "--", - "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", + "--input", + "./corpus", + "--output", + "${TARGET_DIR}/output/", + "--log", + "${TARGET_DIR}/output/log.txt", + "--cores", + "0", + "--asan-cores", + "0", + "--", + "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", ] -dependencies = [ "harness", "fuzzer" ] +dependencies = ["harness", "fuzzer"] [tasks.asan_guest] linux_alias = "asan_guest_unix" @@ -305,15 +336,20 @@ windows_alias = "unsupported" [tasks.asan_guest_unix] command = "${TARGET_DIR}/${PROFILE_DIR}/qemu_launcher-${CARGO_MAKE_PROFILE}" args = [ - "--input", "./corpus", - "--output", "${TARGET_DIR}/output/", - "--log", "${TARGET_DIR}/output/log.txt", - "--cores", "0", - "--asan-guest-cores", "0", - "--", - "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", + "--input", + "./corpus", + "--output", + "${TARGET_DIR}/output/", + "--log", + "${TARGET_DIR}/output/log.txt", + "--cores", + "0", + "--asan-guest-cores", + "0", + "--", + "${TARGET_DIR}/libpng-harness-${CARGO_MAKE_PROFILE}", ] -dependencies = [ "harness", "fuzzer" ] +dependencies = ["harness", "fuzzer"] [tasks.test] linux_alias = "test_unix" @@ -321,8 +357,8 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.test_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Profile: ${PROFILE}" cd injection_test || exit 1 make @@ -350,66 +386,35 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf ${CARGO_MAKE_CRATE_TARGET_DIRECTORY} cargo clean ''' [tasks.arm] command = "cargo" -args = [ - "make", - "-p", "arm", - "run", -] +args = ["make", "-p", "arm", "run"] [tasks.aarch64] command = "cargo" -args = [ - "make", - "-p", "aarch64", - "run", -] +args = ["make", "-p", "aarch64", "run"] [tasks.x86_64] command = "cargo" -args = [ - "make", - "-p", "x86_64", - "run", -] +args = ["make", "-p", "x86_64", "run"] [tasks.i386] command = "cargo" -args = [ - "make", - "-p", "i386", - "run", -] +args = ["make", "-p", "i386", "run"] [tasks.mips] command = "cargo" -args = [ - "make", - "-p", "mips", - "run", -] +args = ["make", "-p", "mips", "run"] [tasks.ppc] command = "cargo" -args = [ - "make", - "-p", "ppc", - "run", -] +args = ["make", "-p", "ppc", "run"] [tasks.all] -dependencies = [ - "arm", - "aarch64", - "x86_64", - "i386", - "mips", - "ppc" -] +dependencies = ["arm", "aarch64", "x86_64", "i386", "mips", "ppc"] diff --git a/fuzzers/qemu_launcher/README.md b/fuzzers/binary_only/qemu_launcher/README.md similarity index 50% rename from fuzzers/qemu_launcher/README.md rename to fuzzers/binary_only/qemu_launcher/README.md index bc9c9b767d..3b7d3dfea7 100644 --- a/fuzzers/qemu_launcher/README.md +++ b/fuzzers/binary_only/qemu_launcher/README.md @@ -1,9 +1,13 @@ # qemu_launcher_ -This folder contains an example fuzzer for libpng, using LLMP for fast multi-process fuzzing and crash detection. It has been tested on Linux. -This automatically spawns n child processes, and binds them to a free core. +This folder contains an example fuzzer that will fuzz binary-only targets, cross-architecture, on Linux. +It's using LLMP for fast multi-process fuzzing and crash detection. +This automatically spawns `n` child processes, and binds them to a free core. + +To adapt the fuzzer to your custom target, change [`harness.rs`](./src/harness.rs). The following architectures are supported: + * arm * aarch64 * i386 @@ -11,10 +15,10 @@ The following architectures are supported: * mips * ppc -Note that the injection feature `-j` is currently only supported on x86_64 -and aarch64. +For usermode, this fuzzer supports injection fuzzing with `-j`. ## Prerequisites + ```bash sudo apt install \ gcc-arm-linux-gnueabi \ @@ -32,7 +36,8 @@ sudo apt install \ ## Run -Defaults to `x86_64` architecture +Defaults to `x86_64` architecture. Change the architecture by + ```bash cargo make run ``` diff --git a/fuzzers/qemu_launcher/build.rs b/fuzzers/binary_only/qemu_launcher/build.rs similarity index 66% rename from fuzzers/qemu_launcher/build.rs rename to fuzzers/binary_only/qemu_launcher/build.rs index 6f93e95067..0f20f30922 100644 --- a/fuzzers/qemu_launcher/build.rs +++ b/fuzzers/binary_only/qemu_launcher/build.rs @@ -1,4 +1,5 @@ -use vergen::EmitBuilder; +use vergen::{BuildBuilder, CargoBuilder, Emitter, RustcBuilder, SysinfoBuilder}; +use vergen_git2::Git2Builder; #[macro_export] macro_rules! assert_unique_feature { @@ -13,12 +14,23 @@ macro_rules! assert_unique_feature { } fn main() { - EmitBuilder::builder() - .all_build() - .all_cargo() - .all_git() - .all_rustc() - .all_sysinfo() + let build = BuildBuilder::all_build().unwrap(); + let cargo = CargoBuilder::all_cargo().unwrap(); + let git = Git2Builder::all_git().unwrap(); + let rustc = RustcBuilder::all_rustc().unwrap(); + let sysinfo = SysinfoBuilder::all_sysinfo().unwrap(); + + Emitter::default() + .add_instructions(&build) + .unwrap() + .add_instructions(&cargo) + .unwrap() + .add_instructions(&git) + .unwrap() + .add_instructions(&rustc) + .unwrap() + .add_instructions(&sysinfo) + .unwrap() .emit() .unwrap(); diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/corpus/not_kitty.png b/fuzzers/binary_only/qemu_launcher/corpus/not_kitty.png similarity index 100% rename from fuzzers/libfuzzer_libpng_aflpp_ui/corpus/not_kitty.png rename to fuzzers/binary_only/qemu_launcher/corpus/not_kitty.png diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/corpus/not_kitty_alpha.png b/fuzzers/binary_only/qemu_launcher/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/libfuzzer_libpng_aflpp_ui/corpus/not_kitty_alpha.png rename to fuzzers/binary_only/qemu_launcher/corpus/not_kitty_alpha.png diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/corpus/not_kitty_gamma.png b/fuzzers/binary_only/qemu_launcher/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/libfuzzer_libpng_aflpp_ui/corpus/not_kitty_gamma.png rename to fuzzers/binary_only/qemu_launcher/corpus/not_kitty_gamma.png diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/corpus/not_kitty_icc.png b/fuzzers/binary_only/qemu_launcher/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/libfuzzer_libpng_aflpp_ui/corpus/not_kitty_icc.png rename to fuzzers/binary_only/qemu_launcher/corpus/not_kitty_icc.png diff --git a/fuzzers/qemu_launcher/harness.cc b/fuzzers/binary_only/qemu_launcher/harness.cc similarity index 100% rename from fuzzers/qemu_launcher/harness.cc rename to fuzzers/binary_only/qemu_launcher/harness.cc diff --git a/fuzzers/binary_only/qemu_launcher/injection_test/.gitignore b/fuzzers/binary_only/qemu_launcher/injection_test/.gitignore new file mode 100644 index 0000000000..2d8ba7c535 --- /dev/null +++ b/fuzzers/binary_only/qemu_launcher/injection_test/.gitignore @@ -0,0 +1,2 @@ +sqltest +static \ No newline at end of file diff --git a/fuzzers/qemu_launcher/injection_test/Makefile b/fuzzers/binary_only/qemu_launcher/injection_test/Makefile similarity index 100% rename from fuzzers/qemu_launcher/injection_test/Makefile rename to fuzzers/binary_only/qemu_launcher/injection_test/Makefile diff --git a/fuzzers/qemu_launcher/injection_test/README.md b/fuzzers/binary_only/qemu_launcher/injection_test/README.md similarity index 100% rename from fuzzers/qemu_launcher/injection_test/README.md rename to fuzzers/binary_only/qemu_launcher/injection_test/README.md diff --git a/fuzzers/qemu_launcher/injection_test/example.db b/fuzzers/binary_only/qemu_launcher/injection_test/example.db similarity index 100% rename from fuzzers/qemu_launcher/injection_test/example.db rename to fuzzers/binary_only/qemu_launcher/injection_test/example.db diff --git a/fuzzers/qemu_launcher/injection_test/sqltest.c b/fuzzers/binary_only/qemu_launcher/injection_test/sqltest.c similarity index 100% rename from fuzzers/qemu_launcher/injection_test/sqltest.c rename to fuzzers/binary_only/qemu_launcher/injection_test/sqltest.c diff --git a/fuzzers/qemu_launcher/injections.toml b/fuzzers/binary_only/qemu_launcher/injections.toml similarity index 77% rename from fuzzers/qemu_launcher/injections.toml rename to fuzzers/binary_only/qemu_launcher/injections.toml index 69789e17ae..6c0fdf3ad4 100644 --- a/fuzzers/qemu_launcher/injections.toml +++ b/fuzzers/binary_only/qemu_launcher/injections.toml @@ -18,32 +18,27 @@ # # 0 = first, 1 = second, ... 0-5 are supported (depending on architecture) [sql] -tokens = [ "'\"\"'\"\n", "\"1\" OR '1'=\"1\"" ] -matches = [ "'\"\"'\"", "1\" OR '1'=\"1" ] +tokens = ["'\"\"'\"\n", "\"1\" OR '1'=\"1\""] +matches = ["'\"\"'\"", "1\" OR '1'=\"1"] [sql.functions] -sqlite3_exec = {param = 1} -PQexec = {param = 1} -PQexecParams = {param = 1} -mysql_query = {param = 1} -mysql_send_query = {param = 1} +sqlite3_exec = { param = 1 } +PQexec = { param = 1 } +PQexecParams = { param = 1 } +mysql_query = { param = 1 } +mysql_send_query = { param = 1 } # Command injection. Note that for most you will need a libc with debug symbols # We do not need this as we watch the SYS_execve syscall, this is just an # example. [cmd] -tokens = [ - "'\"FUZZ\"'", - "\";FUZZ;\"", - "';FUZZ;'", - "$(FUZZ)", -] +tokens = ["'\"FUZZ\"'", "\";FUZZ;\"", "';FUZZ;'", "$(FUZZ)"] matches = ["'\"FUZZ\"'"] [cmd.functions] -popen = {param = 0} -system = {param = 0} +popen = { param = 0 } +system = { param = 0 } # LDAP injection tests [ldap] @@ -51,8 +46,8 @@ tokens = ["*)(FUZZ=*))(|"] matches = ["*)(FUZZ=*))(|"] [ldap.functions] -ldap_search_ext = {param = 3} -ldap_search_ext_s = {param = 3} +ldap_search_ext = { param = 3 } +ldap_search_ext_s = { param = 3 } # XSS injection tests # This is a minimal example that only checks for libxml2 @@ -60,4 +55,4 @@ ldap_search_ext_s = {param = 3} tokens = ["'\">, StdRand, OnDiskCorpus>; + +pub struct Client<'a> { + options: &'a FuzzerOptions, +} + +impl Client<'_> { + pub fn new(options: &FuzzerOptions) -> Client { + Client { options } + } + + pub fn args(&self) -> Result, Error> { + let program = env::args() + .next() + .ok_or_else(|| Error::empty_optional("Failed to read program name"))?; + + let mut args = self.options.args.clone(); + args.insert(0, program); + Ok(args) + } + + #[allow(clippy::unused_self)] // Api should look the same as args above + pub fn env(&self) -> Vec<(String, String)> { + env::vars() + .filter(|(k, _v)| k != "LD_LIBRARY_PATH") + .collect::>() + } + + #[allow(clippy::too_many_lines)] + pub fn run( + &self, + state: Option, + mgr: ClientMgr, + client_description: ClientDescription, + ) -> Result<(), Error> { + let core_id = client_description.core_id(); + let mut args = self.args()?; + Harness::edit_args(&mut args); + log::debug!("ARGS: {:#?}", args); + + let mut env = self.env(); + Harness::edit_env(&mut env); + log::debug!("ENV: {:#?}", env); + + let is_asan = self.options.is_asan_core(core_id); + let is_asan_guest = self.options.is_asan_guest_core(core_id); + + if is_asan && is_asan_guest { + Err(Error::empty_optional("Multiple ASAN modes configured"))?; + } + + let (qemu, mut asan, mut asan_lib) = { + if is_asan { + let (emu, asan) = init_qemu_with_asan(&mut args, &mut env)?; + (emu, Some(asan), None) + } else if is_asan_guest { + let (emu, asan_lib) = init_qemu_with_asan_guest(&mut args, &mut env)?; + (emu, None, Some(asan_lib)) + } else { + (Qemu::init(&args)?, None, None) + } + }; + + #[cfg(not(feature = "injections"))] + let injection_module = None; + + #[cfg(feature = "injections")] + let injection_module = self + .options + .injections + .as_ref() + .and_then(|injections_file| { + let lower = injections_file.to_lowercase(); + if lower.ends_with("yaml") || lower.ends_with("yml") { + Some(InjectionModule::from_yaml(injections_file).unwrap()) + } else if lower.ends_with("toml") { + Some(InjectionModule::from_toml(injections_file).unwrap()) + } else { + None + } + }); + + let harness = Harness::init(qemu).expect("Error setting up harness."); + + let is_cmplog = self.options.is_cmplog_core(core_id); + + let extra_tokens = injection_module + .as_ref() + .map(|h| h.tokens.clone()) + .unwrap_or_default(); + + let instance_builder = Instance::builder() + .options(self.options) + .qemu(qemu) + .harness(harness) + .mgr(mgr) + .client_description(client_description) + .extra_tokens(extra_tokens); + + if self.options.rerun_input.is_some() && self.options.drcov.is_some() { + // Special code path for re-running inputs with DrCov. + // TODO: Add ASan support, injection support + let drcov = self.options.drcov.as_ref().unwrap(); + let drcov = DrCovModule::builder() + .filename(drcov.clone()) + .full_trace(true) + .build(); + instance_builder.build().run(tuple_list!(drcov), state) + } else if is_asan && is_cmplog { + if let Some(injection_module) = injection_module { + instance_builder.build().run( + tuple_list!( + CmpLogModule::default(), + AsanModule::default(asan.take().unwrap()), + injection_module, + ), + state, + ) + } else { + instance_builder.build().run( + tuple_list!( + CmpLogModule::default(), + AsanModule::default(asan.take().unwrap()), + ), + state, + ) + } + } else if is_asan_guest && is_cmplog { + if let Some(injection_module) = injection_module { + instance_builder.build().run( + tuple_list!( + CmpLogModule::default(), + AsanGuestModule::default(qemu, &asan_lib.take().unwrap()), + injection_module + ), + state, + ) + } else { + instance_builder.build().run( + tuple_list!( + CmpLogModule::default(), + AsanGuestModule::default(qemu, &asan_lib.take().unwrap()), + ), + state, + ) + } + } else if is_asan { + if let Some(injection_module) = injection_module { + instance_builder.build().run( + tuple_list!(AsanModule::default(asan.take().unwrap()), injection_module), + state, + ) + } else { + instance_builder.build().run( + tuple_list!(AsanModule::default(asan.take().unwrap()),), + state, + ) + } + } else if is_asan_guest { + let modules = tuple_list!(AsanGuestModule::default(qemu, &asan_lib.take().unwrap())); + instance_builder.build().run(modules, state) + } else if is_cmplog { + if let Some(injection_module) = injection_module { + instance_builder.build().run( + tuple_list!(CmpLogModule::default(), injection_module), + state, + ) + } else { + instance_builder + .build() + .run(tuple_list!(CmpLogModule::default()), state) + } + } else if let Some(injection_module) = injection_module { + instance_builder + .build() + .run(tuple_list!(injection_module), state) + } else { + instance_builder.build().run(tuple_list!(), state) + } + } +} diff --git a/fuzzers/qemu_launcher/src/fuzzer.rs b/fuzzers/binary_only/qemu_launcher/src/fuzzer.rs similarity index 60% rename from fuzzers/qemu_launcher/src/fuzzer.rs rename to fuzzers/binary_only/qemu_launcher/src/fuzzer.rs index fe088f9f9e..5c737f0c6a 100644 --- a/fuzzers/qemu_launcher/src/fuzzer.rs +++ b/fuzzers/binary_only/qemu_launcher/src/fuzzer.rs @@ -10,17 +10,16 @@ use libafl::events::SimpleEventManager; #[cfg(not(feature = "simplemgr"))] use libafl::events::{EventConfig, Launcher, MonitorTypedEventManager}; use libafl::{ - monitors::{ - tui::{ui::TuiUI, TuiMonitor}, - Monitor, MultiMonitor, - }, + events::{ClientDescription, LlmpEventManager, LlmpRestartingEventManager}, + monitors::{tui::TuiMonitor, Monitor, MultiMonitor}, Error, }; -#[cfg(feature = "simplemgr")] -use libafl_bolts::core_affinity::CoreId; -use libafl_bolts::current_time; +use libafl_bolts::{core_affinity::CoreId, current_time, llmp::LlmpBroker, tuples::tuple_list}; #[cfg(not(feature = "simplemgr"))] -use libafl_bolts::shmem::{ShMemProvider, StdShMemProvider}; +use libafl_bolts::{ + shmem::{ShMemProvider, StdShMemProvider}, + staterestore::StateRestorer, +}; #[cfg(unix)] use { nix::unistd::dup, @@ -42,9 +41,11 @@ impl Fuzzer { pub fn fuzz(&self) -> Result<(), Error> { if self.options.tui { - let ui = - TuiUI::with_version(String::from("QEMU Launcher"), String::from("0.10.1"), true); - let monitor = TuiMonitor::new(ui); + let monitor = TuiMonitor::builder() + .title("QEMU Launcher") + .version("0.13.1") + .enhanced_graphics(true) + .build(); self.launch(monitor) } else { let log = self.options.log.as_ref().and_then(|l| { @@ -83,7 +84,7 @@ impl Fuzzer { { // The shared memory allocator #[cfg(not(feature = "simplemgr"))] - let shmem_provider = StdShMemProvider::new()?; + let mut shmem_provider = StdShMemProvider::new()?; /* If we are running in verbose, don't provide a replacement stdout, otherwise, use /dev/null */ #[cfg(not(feature = "simplemgr"))] @@ -95,10 +96,42 @@ impl Fuzzer { let client = Client::new(&self.options); + #[cfg(not(feature = "simplemgr"))] + if self.options.rerun_input.is_some() { + // If we want to rerun a single input but we use a restarting mgr, we'll have to create a fake restarting mgr that doesn't actually restart. + // It's not pretty but better than recompiling with simplemgr. + + // Just a random number, let's hope it's free :) + let broker_port = 13120; + let _fake_broker = LlmpBroker::create_attach_to_tcp( + shmem_provider.clone(), + tuple_list!(), + broker_port, + ) + .unwrap(); + + // To rerun an input, instead of using a launcher, we create dummy parameters and run the client directly. + return client.run( + None, + MonitorTypedEventManager::<_, M>::new(LlmpRestartingEventManager::new( + LlmpEventManager::builder() + .build_on_port( + shmem_provider.clone(), + broker_port, + EventConfig::AlwaysUnique, + None, + ) + .unwrap(), + StateRestorer::new(shmem_provider.new_shmem(0x1000).unwrap()), + )), + ClientDescription::new(0, 0, CoreId(0)), + ); + } + #[cfg(feature = "simplemgr")] return client.run(None, SimpleEventManager::new(monitor), CoreId(0)); - // Build and run a Launcher + // Build and run the Launcher / fuzzer. #[cfg(not(feature = "simplemgr"))] match Launcher::builder() .shmem_provider(shmem_provider) diff --git a/fuzzers/qemu_launcher/src/harness.rs b/fuzzers/binary_only/qemu_launcher/src/harness.rs similarity index 57% rename from fuzzers/qemu_launcher/src/harness.rs rename to fuzzers/binary_only/qemu_launcher/src/harness.rs index 946796be78..6376348d0c 100644 --- a/fuzzers/qemu_launcher/src/harness.rs +++ b/fuzzers/binary_only/qemu_launcher/src/harness.rs @@ -4,10 +4,12 @@ use libafl::{ Error, }; use libafl_bolts::AsSlice; -use libafl_qemu::{ArchExtras, CallingConvention, GuestAddr, GuestReg, MmapPerms, Qemu, Regs}; +use libafl_qemu::{ + elf::EasyElf, ArchExtras, CallingConvention, GuestAddr, GuestReg, MmapPerms, Qemu, Regs, +}; -pub struct Harness<'a> { - qemu: &'a Qemu, +pub struct Harness { + qemu: Qemu, input_addr: GuestAddr, pc: GuestAddr, stack_ptr: GuestAddr, @@ -16,8 +18,41 @@ pub struct Harness<'a> { pub const MAX_INPUT_SIZE: usize = 1_048_576; // 1MB -impl<'a> Harness<'a> { - pub fn new(qemu: &Qemu) -> Result { +impl Harness { + /// Change environment + #[inline] + #[allow(clippy::ptr_arg)] + pub fn edit_env(_env: &mut Vec<(String, String)>) {} + + /// Change arguments + #[inline] + #[allow(clippy::ptr_arg)] + pub fn edit_args(_args: &mut Vec) {} + + /// Helper function to find the function we want to fuzz. + fn start_pc(qemu: Qemu) -> Result { + let mut elf_buffer = Vec::new(); + let elf = EasyElf::from_file(qemu.binary_path(), &mut elf_buffer)?; + + let start_pc = elf + .resolve_symbol("LLVMFuzzerTestOneInput", qemu.load_addr()) + .ok_or_else(|| Error::empty_optional("Symbol LLVMFuzzerTestOneInput not found"))?; + Ok(start_pc) + } + + /// Initialize the emulator, run to the entrypoint (or jump there) and return the [`Harness`] struct + pub fn init(qemu: Qemu) -> Result { + let start_pc = Self::start_pc(qemu)?; + log::debug!("start_pc @ {start_pc:#x}"); + + qemu.entry_break(start_pc); + + let ret_addr: GuestAddr = qemu + .read_return_address() + .map_err(|e| Error::unknown(format!("Failed to read return address: {e:?}")))?; + log::debug!("ret_addr = {ret_addr:#x}"); + qemu.set_breakpoint(ret_addr); + let input_addr = qemu .map_private(0, MAX_INPUT_SIZE, MmapPerms::ReadWrite) .map_err(|e| Error::unknown(format!("Failed to map input buffer: {e:}")))?; @@ -43,6 +78,11 @@ impl<'a> Harness<'a> { }) } + /// If we need to do extra work after forking, we can do that here. + #[inline] + #[allow(clippy::unused_self)] + pub fn post_fork(&self) {} + pub fn run(&self, input: &BytesInput) -> ExitKind { self.reset(input).unwrap(); ExitKind::Ok @@ -58,7 +98,12 @@ impl<'a> Harness<'a> { } let len = len as GuestReg; - unsafe { self.qemu.write_mem(self.input_addr, buf) }; + self.qemu.write_mem(self.input_addr, buf).map_err(|e| { + Error::unknown(format!( + "Failed to write to memory@{:#x}: {e:?}", + self.input_addr + )) + })?; self.qemu .write_reg(Regs::Pc, self.pc) diff --git a/fuzzers/qemu_launcher/src/instance.rs b/fuzzers/binary_only/qemu_launcher/src/instance.rs similarity index 58% rename from fuzzers/qemu_launcher/src/instance.rs rename to fuzzers/binary_only/qemu_launcher/src/instance.rs index 519eaa6b89..268aa75f88 100644 --- a/fuzzers/qemu_launcher/src/instance.rs +++ b/fuzzers/binary_only/qemu_launcher/src/instance.rs @@ -1,5 +1,5 @@ -use core::{fmt::Debug, ptr::addr_of_mut}; -use std::{marker::PhantomData, process}; +use core::fmt::Debug; +use std::{fs, marker::PhantomData, ops::Range, process, time::Duration}; #[cfg(feature = "simplemgr")] use libafl::events::SimpleEventManager; @@ -7,42 +7,43 @@ use libafl::events::SimpleEventManager; use libafl::events::{LlmpRestartingEventManager, MonitorTypedEventManager}; use libafl::{ corpus::{Corpus, InMemoryOnDiskCorpus, OnDiskCorpus}, - events::EventRestarter, - executors::ShadowExecutor, + events::{ClientDescription, EventRestarter, NopEventManager}, + executors::{Executor, ShadowExecutor}, feedback_or, feedback_or_fast, feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, fuzzer::{Evaluator, Fuzzer, StdFuzzer}, inputs::BytesInput, monitors::Monitor, mutators::{ - scheduled::havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, - StdMOptMutator, StdScheduledMutator, Tokens, + havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, StdMOptMutator, + StdScheduledMutator, Tokens, }, observers::{CanTrack, HitcountsMapObserver, TimeObserver, VariableMapObserver}, schedulers::{ powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, PowerQueueScheduler, }, stages::{ - calibrate::CalibrationStage, power::StdPowerMutationalStage, ShadowTracingStage, - StagesTuple, StdMutationalStage, + calibrate::CalibrationStage, power::StdPowerMutationalStage, IfStage, ShadowTracingStage, + StagesTuple, StatsStage, StdMutationalStage, }, state::{HasCorpus, StdState, UsesState}, - Error, HasMetadata, + Error, HasMetadata, NopFuzzer, }; #[cfg(not(feature = "simplemgr"))] use libafl_bolts::shmem::StdShMemProvider; use libafl_bolts::{ - core_affinity::CoreId, ownedref::OwnedMutSlice, rands::StdRand, - tuples::{tuple_list, Merge}, + tuples::{tuple_list, Merge, Prepend}, }; use libafl_qemu::{ - cmplog::CmpLogObserver, - edges::{edges_map_mut_ptr, EDGES_MAP_SIZE_IN_USE, MAX_EDGES_FOUND}, - helpers::QemuHelperTuple, - Qemu, QemuExecutor, QemuHooks, + elf::EasyElf, + modules::{ + cmplog::CmpLogObserver, EmulatorModuleTuple, StdAddressFilter, StdEdgeCoverageModule, + }, + Emulator, GuestAddr, Qemu, QemuExecutor, }; +use libafl_targets::{edges_map_mut_ptr, EDGES_MAP_DEFAULT_SIZE, MAX_EDGES_FOUND}; use typed_builder::TypedBuilder; use crate::{harness::Harness, options::FuzzerOptions}; @@ -59,31 +60,74 @@ pub type ClientMgr = #[derive(TypedBuilder)] pub struct Instance<'a, M: Monitor> { options: &'a FuzzerOptions, - qemu: &'a Qemu, + /// The harness. We create it before forking, then `take()` it inside the client. + #[builder(setter(strip_option))] + harness: Option, + qemu: Qemu, mgr: ClientMgr, - core_id: CoreId, - extra_tokens: Option>, + client_description: ClientDescription, + #[builder(default)] + extra_tokens: Vec, #[builder(default=PhantomData)] phantom: PhantomData, } -impl<'a, M: Monitor> Instance<'a, M> { - pub fn run(&mut self, helpers: QT, state: Option) -> Result<(), Error> - where - QT: QemuHelperTuple + Debug, - { - let mut hooks = QemuHooks::new(*self.qemu, helpers); +impl Instance<'_, M> { + #[allow(clippy::similar_names)] // elf != self + fn coverage_filter(&self, qemu: Qemu) -> Result { + /* Conversion is required on 32-bit targets, but not on 64-bit ones */ + if let Some(includes) = &self.options.include { + #[cfg_attr(target_pointer_width = "64", allow(clippy::useless_conversion))] + let rules = includes + .iter() + .map(|x| Range { + start: x.start.into(), + end: x.end.into(), + }) + .collect::>>(); + Ok(StdAddressFilter::allow_list(rules)) + } else if let Some(excludes) = &self.options.exclude { + #[cfg_attr(target_pointer_width = "64", allow(clippy::useless_conversion))] + let rules = excludes + .iter() + .map(|x| Range { + start: x.start.into(), + end: x.end.into(), + }) + .collect::>>(); + Ok(StdAddressFilter::deny_list(rules)) + } else { + let mut elf_buffer = Vec::new(); + let elf = EasyElf::from_file(qemu.binary_path(), &mut elf_buffer)?; + let range = elf + .get_section(".text", qemu.load_addr()) + .ok_or_else(|| Error::key_not_found("Failed to find .text section"))?; + Ok(StdAddressFilter::allow_list(vec![range])) + } + } + #[allow(clippy::too_many_lines)] + pub fn run(&mut self, modules: ET, state: Option) -> Result<(), Error> + where + ET: EmulatorModuleTuple + Debug, + { // Create an observation channel using the coverage map - let edges_observer = unsafe { + let mut edges_observer = unsafe { HitcountsMapObserver::new(VariableMapObserver::from_mut_slice( "edges", - OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_SIZE_IN_USE), - addr_of_mut!(MAX_EDGES_FOUND), + OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_DEFAULT_SIZE), + &raw mut MAX_EDGES_FOUND, )) .track_indices() }; + let edge_coverage_module = StdEdgeCoverageModule::builder() + .map_observer(edges_observer.as_mut()) + .address_filter(self.coverage_filter(self.qemu)?) + .build()?; + + let modules = modules.prepend(edge_coverage_module); + // Create an observation channel to keep track of the execution time let time_observer = TimeObserver::new("time"); @@ -91,6 +135,11 @@ impl<'a, M: Monitor> Instance<'a, M> { let calibration = CalibrationStage::new(&map_feedback); + let stats_stage = IfStage::new( + |_, _, _, _| Ok(self.options.tui), + tuple_list!(StatsStage::new(Duration::from_secs(5))), + ); + // Feedback to rate the interestingness of an input // This one is composed by two Feedbacks in OR let mut feedback = feedback_or!( @@ -111,10 +160,12 @@ impl<'a, M: Monitor> Instance<'a, M> { // RNG StdRand::new(), // Corpus that will be evolved, we keep it in memory for performance - InMemoryOnDiskCorpus::no_meta(self.options.queue_dir(self.core_id))?, + InMemoryOnDiskCorpus::no_meta( + self.options.queue_dir(self.client_description.clone()), + )?, // Corpus in which we store solutions (crashes in this example), // on disk so the user can get them after stopping the fuzzer - OnDiskCorpus::new(self.options.crashes_dir(self.core_id))?, + OnDiskCorpus::new(self.options.crashes_dir(self.client_description.clone()))?, // States of the feedbacks. // The feedbacks can report the data that should persist in the State. &mut feedback, @@ -127,18 +178,16 @@ impl<'a, M: Monitor> Instance<'a, M> { // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( &edges_observer, - PowerQueueScheduler::new(&mut state, &edges_observer, PowerSchedule::FAST), + PowerQueueScheduler::new(&mut state, &edges_observer, PowerSchedule::fast()), ); let observers = tuple_list!(edges_observer, time_observer); let mut tokens = Tokens::new(); - if let Some(extra_tokens) = &self.extra_tokens { - for token in extra_tokens { - let bytes = token.as_bytes().to_vec(); - let _ = tokens.add_token(&bytes); - } + for token in &self.extra_tokens { + let bytes = token.as_bytes().to_vec(); + let _ = tokens.add_token(&bytes); } if let Some(tokenfile) = &self.options.tokens { @@ -147,16 +196,56 @@ impl<'a, M: Monitor> Instance<'a, M> { state.add_metadata(tokens); - let harness = Harness::new(self.qemu)?; - let mut harness = |input: &BytesInput| harness.run(input); + let harness = self + .harness + .take() + .expect("The harness can never be None here!"); + harness.post_fork(); + + let mut harness = |_emulator: &mut Emulator<_, _, _, _, _>, + _state: &mut _, + input: &BytesInput| harness.run(input); // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); - if self.options.is_cmplog_core(self.core_id) { + let emulator = Emulator::empty().qemu(self.qemu).modules(modules).build()?; + + if let Some(rerun_input) = &self.options.rerun_input { + // TODO: We might want to support non-bytes inputs at some point? + let bytes = fs::read(rerun_input) + .unwrap_or_else(|_| panic!("Could not load file {rerun_input:?}")); + let input = BytesInput::new(bytes); + + let mut executor = QemuExecutor::new( + emulator, + &mut harness, + observers, + &mut fuzzer, + &mut state, + &mut self.mgr, + self.options.timeout, + )?; + + executor + .run_target( + &mut NopFuzzer::new(), + &mut state, + &mut NopEventManager::new(), + &input, + ) + .expect("Error running target"); + // We're done :) + process::exit(0); + } + + if self + .options + .is_cmplog_core(self.client_description.core_id()) + { // Create a QEMU in-process executor let executor = QemuExecutor::new( - &mut hooks, + emulator, &mut harness, observers, &mut fuzzer, @@ -185,16 +274,17 @@ impl<'a, M: Monitor> Instance<'a, M> { 5, )?; - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); // The order of the stages matter! - let mut stages = tuple_list!(calibration, tracing, i2s, power); + let mut stages = tuple_list!(calibration, tracing, i2s, power, stats_stage); self.fuzz(&mut state, &mut fuzzer, &mut executor, &mut stages) } else { // Create a QEMU in-process executor let mut executor = QemuExecutor::new( - &mut hooks, + emulator, &mut harness, observers, &mut fuzzer, diff --git a/fuzzers/qemu_launcher/src/main.rs b/fuzzers/binary_only/qemu_launcher/src/main.rs similarity index 100% rename from fuzzers/qemu_launcher/src/main.rs rename to fuzzers/binary_only/qemu_launcher/src/main.rs diff --git a/fuzzers/qemu_launcher/src/options.rs b/fuzzers/binary_only/qemu_launcher/src/options.rs similarity index 77% rename from fuzzers/qemu_launcher/src/options.rs rename to fuzzers/binary_only/qemu_launcher/src/options.rs index 20e9f598de..2465196e31 100644 --- a/fuzzers/qemu_launcher/src/options.rs +++ b/fuzzers/binary_only/qemu_launcher/src/options.rs @@ -2,7 +2,7 @@ use core::time::Duration; use std::{env, ops::Range, path::PathBuf}; use clap::{error::ErrorKind, CommandFactory, Parser}; -use libafl::Error; +use libafl::{events::ClientDescription, Error}; use libafl_bolts::core_affinity::{CoreId, Cores}; use libafl_qemu::GuestAddr; @@ -39,7 +39,7 @@ pub struct FuzzerOptions { #[arg(long, help = "Log file")] pub log: Option, - #[arg(long, help = "Timeout in milli-seconds", default_value = "1000", value_parser = FuzzerOptions::parse_timeout)] + #[arg(long, help = "Timeout in milliseconds", default_value = "1000", value_parser = FuzzerOptions::parse_timeout)] pub timeout: Duration, #[arg(long = "port", help = "Broker port", default_value_t = 1337_u16)] @@ -48,10 +48,10 @@ pub struct FuzzerOptions { #[arg(long, help = "Cpu cores to use", default_value = "all", value_parser = Cores::from_cmdline)] pub cores: Cores, - #[arg(long, help = "Cpu cores to use for ASAN", value_parser = Cores::from_cmdline)] + #[arg(long, help = "Cpu cores to use for ASan", value_parser = Cores::from_cmdline)] pub asan_cores: Option, - #[arg(long, help = "Cpu cores to use for ASAN", value_parser = Cores::from_cmdline)] + #[arg(long, help = "Cpu cores to use for ASan", value_parser = Cores::from_cmdline)] pub asan_guest_cores: Option, #[arg(long, help = "Cpu cores to use for CmpLog", value_parser = Cores::from_cmdline)] @@ -63,7 +63,7 @@ pub struct FuzzerOptions { #[clap(long, help = "Enable AFL++ style output", conflicts_with = "verbose")] pub tui: bool, - #[arg(long = "iterations", help = "Maximum numer of iterations")] + #[arg(long = "iterations", help = "Maximum number of iterations")] pub iterations: Option, #[arg(long = "include", help="Include address ranges", value_parser = FuzzerOptions::parse_ranges)] @@ -72,6 +72,18 @@ pub struct FuzzerOptions { #[arg(long = "exclude", help="Exclude address ranges", value_parser = FuzzerOptions::parse_ranges, conflicts_with="include")] pub exclude: Option>>, + #[arg( + short = 'd', + help = "Write a DrCov Trace for the current input. Requires -r." + )] + pub drcov: Option, + + #[arg( + short = 'r', + help = "An input to rerun, instead of starting to fuzz. Will ignore all other settings apart from -d." + )] + pub rerun_input: Option, + #[arg(last = true, help = "Arguments passed to the target")] pub args: Vec, } @@ -113,39 +125,39 @@ impl FuzzerOptions { pub fn is_asan_core(&self, core_id: CoreId) -> bool { self.asan_cores .as_ref() - .map_or(false, |c| c.contains(core_id)) + .is_some_and(|c| c.contains(core_id)) } pub fn is_asan_guest_core(&self, core_id: CoreId) -> bool { self.asan_guest_cores .as_ref() - .map_or(false, |c| c.contains(core_id)) + .is_some_and(|c| c.contains(core_id)) } pub fn is_cmplog_core(&self, core_id: CoreId) -> bool { self.cmplog_cores .as_ref() - .map_or(false, |c| c.contains(core_id)) + .is_some_and(|c| c.contains(core_id)) } pub fn input_dir(&self) -> PathBuf { PathBuf::from(&self.input) } - pub fn output_dir(&self, core_id: CoreId) -> PathBuf { + pub fn output_dir(&self, client_description: ClientDescription) -> PathBuf { let mut dir = PathBuf::from(&self.output); - dir.push(format!("cpu_{:03}", core_id.0)); + dir.push(format!("client_{:03}", client_description.id())); dir } - pub fn queue_dir(&self, core_id: CoreId) -> PathBuf { - let mut dir = self.output_dir(core_id).clone(); + pub fn queue_dir(&self, client_description: ClientDescription) -> PathBuf { + let mut dir = self.output_dir(client_description).clone(); dir.push("queue"); dir } - pub fn crashes_dir(&self, core_id: CoreId) -> PathBuf { - let mut dir = self.output_dir(core_id).clone(); + pub fn crashes_dir(&self, client_description: ClientDescription) -> PathBuf { + let mut dir = self.output_dir(client_description).clone(); dir.push("crashes"); dir } @@ -182,5 +194,14 @@ impl FuzzerOptions { } } } + + if self.drcov.is_some() && self.rerun_input.is_none() { + let mut cmd = FuzzerOptions::command(); + cmd.error( + ErrorKind::ValueValidation, + "The `drcov` option is only supported with `rerun_input`.".to_string(), + ) + .exit(); + } } } diff --git a/fuzzers/qemu_launcher/src/version.rs b/fuzzers/binary_only/qemu_launcher/src/version.rs similarity index 100% rename from fuzzers/qemu_launcher/src/version.rs rename to fuzzers/binary_only/qemu_launcher/src/version.rs diff --git a/fuzzers/binary_only/tinyinst_simple/Cargo.toml b/fuzzers/binary_only/tinyinst_simple/Cargo.toml new file mode 100644 index 0000000000..a4a7f16ccf --- /dev/null +++ b/fuzzers/binary_only/tinyinst_simple/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "tinyinst_simple" +version = "0.14.1" +edition = "2021" + +[dependencies] +libafl = { path = "../../../libafl", features = ["introspection"] } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_tinyinst = { path = "../../../libafl_tinyinst" } +log = { version = "0.4.22", features = ["release_max_level_info"] } + +[profile.release] +codegen-units = 1 +opt-level = 3 diff --git a/fuzzers/tinyinst_simple/Makefile.toml b/fuzzers/binary_only/tinyinst_simple/Makefile.toml similarity index 80% rename from fuzzers/tinyinst_simple/Makefile.toml rename to fuzzers/binary_only/tinyinst_simple/Makefile.toml index 7b6aefc1a0..68fa085a01 100644 --- a/fuzzers/tinyinst_simple/Makefile.toml +++ b/fuzzers/binary_only/tinyinst_simple/Makefile.toml @@ -1,11 +1,15 @@ [env] -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = { value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} -CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } +CARGO_TARGET_DIR = { value = "target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -16,12 +20,12 @@ mac_alias = "unsupported" windows_alias = "harness_windows" [tasks.harness_linux] -script=''' +script = ''' clang test/test.cpp -o test.exe ''' [tasks.harness_windows] -script=''' +script = ''' cl test\test.cpp -o test.exe ''' @@ -65,8 +69,8 @@ mac_alias = "unsupported" windows_alias = "test_windows" [tasks.test_linux] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cp ${CARGO_TARGET_DIR}/${PROFILE_DIR}/tinyinst_simple . echo running tests timeout 5s ./tinyinst_simple || true @@ -81,11 +85,11 @@ dependencies = ["harness", "fuzzer"] [tasks.test_windows] script_runner = "@shell" -script=''' +script = ''' copy .\target\${PROFILE_DIR}\tinyinst_simple.exe . start "" "tinyinst_simple.exe" #ping is for timeout ping -n 10 127.0.0.1>NUL && taskkill /im tinyinst_simple.exe /F >nul 2>nul dir /a-d "corpus_discovered\*" && (echo Files exist) || (exit /b 1337) ''' -dependencies = ["harness", "fuzzer"] \ No newline at end of file +dependencies = ["harness", "fuzzer"] diff --git a/fuzzers/tinyinst_simple/README.md b/fuzzers/binary_only/tinyinst_simple/README.md similarity index 100% rename from fuzzers/tinyinst_simple/README.md rename to fuzzers/binary_only/tinyinst_simple/README.md diff --git a/fuzzers/tinyinst_simple/src/main.rs b/fuzzers/binary_only/tinyinst_simple/src/main.rs similarity index 80% rename from fuzzers/tinyinst_simple/src/main.rs rename to fuzzers/binary_only/tinyinst_simple/src/main.rs index bfc96b4e2b..aa32678f58 100644 --- a/fuzzers/tinyinst_simple/src/main.rs +++ b/fuzzers/binary_only/tinyinst_simple/src/main.rs @@ -1,4 +1,4 @@ -use std::{path::PathBuf, ptr::addr_of_mut, time::Duration}; +use std::{path::PathBuf, time::Duration}; use libafl::{ corpus::{CachedOnDiskCorpus, Corpus, OnDiskCorpus, Testcase}, @@ -37,7 +37,7 @@ fn main() { // use file to pass testcases // let args = vec!["test.exe".to_string(), "-f".to_string(), "@@".to_string()]; - let coverage = unsafe { OwnedMutPtr::Ptr(addr_of_mut!(COVERAGE)) }; + let coverage = OwnedMutPtr::Ptr(&raw mut COVERAGE); let observer = ListObserver::new("cov", coverage); let mut feedback = ListFeedback::new(&observer); #[cfg(windows)] @@ -62,18 +62,17 @@ fn main() { let monitor = SimpleMonitor::new(|x| println!("{x}")); let mut mgr = SimpleEventManager::new(monitor); - let mut executor = unsafe { - TinyInstExecutor::builder() - .tinyinst_args(tinyinst_args) - .program_args(args) - .use_shmem() - .persistent("test.exe".to_string(), "fuzz".to_string(), 1, 10000) - .timeout(Duration::new(5, 0)) - .shmem_provider(&mut shmem_provider) - .coverage_ptr(addr_of_mut!(COVERAGE)) - .build(tuple_list!(observer)) - .unwrap() - }; + let mut executor = TinyInstExecutor::builder() + .tinyinst_args(tinyinst_args) + .program_args(args) + .use_shmem() + .persistent("test.exe".to_string(), "fuzz".to_string(), 1, 10000) + .timeout(Duration::new(5, 0)) + .shmem_provider(&mut shmem_provider) + .coverage_ptr(&raw mut COVERAGE) + .build(tuple_list!(observer)) + .unwrap(); + let mutator = StdScheduledMutator::new(havoc_mutations()); let mut stages = tuple_list!(StdMutationalStage::new(mutator)); fuzzer diff --git a/fuzzers/tinyinst_simple/test/crash_input.txt b/fuzzers/binary_only/tinyinst_simple/test/crash_input.txt similarity index 100% rename from fuzzers/tinyinst_simple/test/crash_input.txt rename to fuzzers/binary_only/tinyinst_simple/test/crash_input.txt diff --git a/fuzzers/tinyinst_simple/test/ok_input.txt b/fuzzers/binary_only/tinyinst_simple/test/ok_input.txt similarity index 100% rename from fuzzers/tinyinst_simple/test/ok_input.txt rename to fuzzers/binary_only/tinyinst_simple/test/ok_input.txt diff --git a/fuzzers/tinyinst_simple/test/test.cpp b/fuzzers/binary_only/tinyinst_simple/test/test.cpp similarity index 100% rename from fuzzers/tinyinst_simple/test/test.cpp rename to fuzzers/binary_only/tinyinst_simple/test/test.cpp diff --git a/fuzzers/cargo_fuzz/README.md b/fuzzers/cargo_fuzz/README.md deleted file mode 100644 index 4d30ba27ef..0000000000 --- a/fuzzers/cargo_fuzz/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# cargo-fuzz - -This is a minimalistic example how to use LibAFL with cargo-fuzz. It uses the `libafl_libfuzzer` comatability layer to be libFuzzer compatiable. diff --git a/fuzzers/dynamic_analysis/Cargo.toml b/fuzzers/dynamic_analysis/Cargo.toml deleted file mode 100644 index 5b35c0894f..0000000000 --- a/fuzzers/dynamic_analysis/Cargo.toml +++ /dev/null @@ -1,41 +0,0 @@ -[package] -name = "dynamic_analysis" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] -no_link_main = ["libafl_targets/libfuzzer_no_link_main"] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[profile.release-fuzzbench] -inherits = "release" -debug = false -strip = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "6.0" - -[dependencies] -env_logger = "0.11" -once_cell = "1.19" -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "sancov_cmplog", "libfuzzer", "function-logging"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -clap = { version = "4.0", features = ["default"] } -nix = { version = "0.29", features = ["fs"] } -mimalloc = { version = "*", default-features = false } - -[lib] -name = "fuzzbench" -crate-type = ["staticlib"] diff --git a/fuzzers/dynamic_analysis/README.md b/fuzzers/dynamic_analysis/README.md deleted file mode 100644 index 9da06f3f4d..0000000000 --- a/fuzzers/dynamic_analysis/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Dynamic Analysis Fuzzer -This fuzzer is to show how you can collect runtime analysis information during fuzzing using LibAFL. We use the Little-CMS project for the example. -First, this fuzzer requires `nlohmann-json3-dev` to work. - -To run the fuzzer, -0. Compile the fuzzer with `cargo build --release` -1. `mkdir analysis` and run `build.sh`. This will compile Little-CMS to extract the analysis information and generate a json file for each module. -2. run `python3 concatenator.py analysis`. This will concatenate all the json into one single file. This json file maps a function id to its analysis information. -3. Compile the fuzzer with `cargo make fuzzer`. This will instrument the fuzzer at every function entry point. Therefore, whenever we reach the entry of any function, we -can log its id and logs what functions we executed. -4. Run the fuzzer `RUST_LOG=info ./fuzzer --input ./corpus --output ./out`. You'll see a stream of analysis data \ No newline at end of file diff --git a/fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/.gitignore b/fuzzers/forkserver/baby_fuzzer_with_forkexecutor/.gitignore similarity index 100% rename from fuzzers/backtrace_baby_fuzzers/rust_code_with_inprocess_executor/.gitignore rename to fuzzers/forkserver/baby_fuzzer_with_forkexecutor/.gitignore diff --git a/fuzzers/forkserver/baby_fuzzer_with_forkexecutor/Cargo.toml b/fuzzers/forkserver/baby_fuzzer_with_forkexecutor/Cargo.toml new file mode 100644 index 0000000000..c4451befd8 --- /dev/null +++ b/fuzzers/forkserver/baby_fuzzer_with_forkexecutor/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "baby_fuzzer_with_forkexecutor" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/baby_fuzzer_with_forkexecutor/README.md b/fuzzers/forkserver/baby_fuzzer_with_forkexecutor/README.md similarity index 100% rename from fuzzers/baby_fuzzer_with_forkexecutor/README.md rename to fuzzers/forkserver/baby_fuzzer_with_forkexecutor/README.md diff --git a/fuzzers/baby_fuzzer_with_forkexecutor/src/main.rs b/fuzzers/forkserver/baby_fuzzer_with_forkexecutor/src/main.rs similarity index 96% rename from fuzzers/baby_fuzzer_with_forkexecutor/src/main.rs rename to fuzzers/forkserver/baby_fuzzer_with_forkexecutor/src/main.rs index 7ab274bdcf..ca36d11c97 100644 --- a/fuzzers/baby_fuzzer_with_forkexecutor/src/main.rs +++ b/fuzzers/forkserver/baby_fuzzer_with_forkexecutor/src/main.rs @@ -11,13 +11,14 @@ use libafl::{ generators::RandPrintablesGenerator, inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::StdMapObserver, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, state::StdState, }; use libafl_bolts::{ + nonzero, rands::StdRand, shmem::{unix_shmem, ShMemProvider}, tuples::tuple_list, @@ -115,7 +116,7 @@ pub fn main() { .expect("Failed to create the Executor"); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/forkserver/forkserver_libafl_cc/Cargo.toml b/fuzzers/forkserver/forkserver_libafl_cc/Cargo.toml new file mode 100644 index 0000000000..2081a1e305 --- /dev/null +++ b/fuzzers/forkserver/forkserver_libafl_cc/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "forkserver_libafl_cc" +version = "0.14.1" +authors = ["ergrelet "] +edition = "2021" + +[features] +default = ["std"] +std = [] +# Forces a crash +crash = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = { version = "6.0.3" } + +[dependencies] +clap = { version = "4.5.18", features = ["derive"] } +nix = { version = "0.29.0", features = ["signal"] } +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_cc = { path = "../../../libafl_cc" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", + "pointer_maps", +] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +env_logger = "0.11.5" + +[lib] +name = "libforkserver_libafl_cc" +crate-type = ["staticlib"] diff --git a/fuzzers/forkserver_libafl_cc/Makefile.toml b/fuzzers/forkserver/forkserver_libafl_cc/Makefile.toml similarity index 76% rename from fuzzers/forkserver_libafl_cc/Makefile.toml rename to fuzzers/forkserver/forkserver_libafl_cc/Makefile.toml index 94bf9daf19..bb04c5338e 100644 --- a/fuzzers/forkserver_libafl_cc/Makefile.toml +++ b/fuzzers/forkserver/forkserver_libafl_cc/Makefile.toml @@ -1,17 +1,21 @@ # Variables [env] -FUZZER_NAME='fuzzer_libafl_cc' -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +FUZZER_NAME = 'fuzzer_libafl_cc' +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' LIBAFL_CXX = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx' FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' PROJECT_DIR = { script = ["pwd"] } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -23,7 +27,7 @@ windows_alias = "unsupported" [tasks.cxx_unix] command = "cargo" -args = ["build" , "--profile","${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] linux_alias = "cc_unix" @@ -32,7 +36,7 @@ windows_alias = "unsupported" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.crash_cxx] linux_alias = "crash_cxx_unix" @@ -41,7 +45,7 @@ windows_alias = "unsupported" [tasks.crash_cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--features=crash"] +args = ["build", "--profile", "${PROFILE}", "--features=crash"] [tasks.crash_cc] linux_alias = "crash_cc_unix" @@ -50,7 +54,7 @@ windows_alias = "unsupported" [tasks.crash_cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--features=crash"] +args = ["build", "--profile", "${PROFILE}", "--features=crash"] # Harness [tasks.fuzzer] @@ -61,7 +65,7 @@ windows_alias = "unsupported" [tasks.fuzzer_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" args = ["${PROJECT_DIR}/src/program.c", "-o", "${FUZZER_NAME}", "-lm"] -dependencies = [ "cxx", "cc" ] +dependencies = ["cxx", "cc"] # Crashing Harness [tasks.fuzzer_crash] @@ -72,7 +76,7 @@ windows_alias = "unsupported" [tasks.fuzzer_crash_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" args = ["${PROJECT_DIR}/src/program.c", "-o", "${FUZZER_NAME}_crash", "-lm"] -dependencies = [ "crash_cxx", "crash_cc" ] +dependencies = ["crash_cxx", "crash_cc"] # Run the fuzzer [tasks.run] @@ -82,10 +86,10 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' taskset -c 1 ${CARGO_TARGET_DIR}/${PROFILE_DIR}/${CARGO_MAKE_PROJECT_NAME} ./${FUZZER_NAME} ./corpus/ -t 1000 ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Run the fuzzer with a crash @@ -96,10 +100,10 @@ windows_alias = "unsupported" [tasks.crash_unix] script_runner = "@shell" -script=''' +script = ''' taskset -c 1 ${CARGO_TARGET_DIR}/${PROFILE_DIR}/${CARGO_MAKE_PROJECT_NAME} ./${FUZZER_NAME}_crash ./corpus/ -t 1000 ''' -dependencies = [ "fuzzer_crash" ] +dependencies = ["fuzzer_crash"] # Test [tasks.test] @@ -109,7 +113,7 @@ windows_alias = "unsupported" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' timeout 30s ${CARGO_TARGET_DIR}/${PROFILE_DIR}/${CARGO_MAKE_PROJECT_NAME} ./${FUZZER_NAME} ./corpus/ -t 1000 | tee fuzz_stdout.log || true if grep -qa "objectives: 1" fuzz_stdout.log; then echo "Fuzzer is working" @@ -119,7 +123,7 @@ else fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] @@ -130,8 +134,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} cargo clean ''' diff --git a/fuzzers/forkserver_libafl_cc/README.md b/fuzzers/forkserver/forkserver_libafl_cc/README.md similarity index 100% rename from fuzzers/forkserver_libafl_cc/README.md rename to fuzzers/forkserver/forkserver_libafl_cc/README.md diff --git a/fuzzers/forkserver_libafl_cc/corpus/testfile b/fuzzers/forkserver/forkserver_libafl_cc/corpus/testfile similarity index 100% rename from fuzzers/forkserver_libafl_cc/corpus/testfile rename to fuzzers/forkserver/forkserver_libafl_cc/corpus/testfile diff --git a/fuzzers/forkserver_libafl_cc/src/bin/libafl_cc.rs b/fuzzers/forkserver/forkserver_libafl_cc/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/forkserver_libafl_cc/src/bin/libafl_cc.rs rename to fuzzers/forkserver/forkserver_libafl_cc/src/bin/libafl_cc.rs diff --git a/fuzzers/forkserver_libafl_cc/src/bin/libafl_cxx.rs b/fuzzers/forkserver/forkserver_libafl_cc/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/forkserver_libafl_cc/src/bin/libafl_cxx.rs rename to fuzzers/forkserver/forkserver_libafl_cc/src/bin/libafl_cxx.rs diff --git a/fuzzers/forkserver_libafl_cc/src/lib.rs b/fuzzers/forkserver/forkserver_libafl_cc/src/lib.rs similarity index 100% rename from fuzzers/forkserver_libafl_cc/src/lib.rs rename to fuzzers/forkserver/forkserver_libafl_cc/src/lib.rs diff --git a/fuzzers/forkserver_libafl_cc/src/main.rs b/fuzzers/forkserver/forkserver_libafl_cc/src/main.rs similarity index 97% rename from fuzzers/forkserver_libafl_cc/src/main.rs rename to fuzzers/forkserver/forkserver_libafl_cc/src/main.rs index 5b131120ca..ddcc7229d0 100644 --- a/fuzzers/forkserver_libafl_cc/src/main.rs +++ b/fuzzers/forkserver/forkserver_libafl_cc/src/main.rs @@ -11,7 +11,7 @@ use libafl::{ fuzzer::{Fuzzer, StdFuzzer}, inputs::BytesInput, monitors::SimpleMonitor, - mutators::{scheduled::havoc_mutations, tokens_mutations, StdScheduledMutator, Tokens}, + mutators::{havoc_mutations, tokens_mutations, StdScheduledMutator, Tokens}, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, stages::mutational::StdMutationalStage, @@ -24,7 +24,7 @@ use libafl_bolts::{ tuples::{tuple_list, Handled, MatchNameRef, Merge}, AsSliceMut, Truncate, }; -use libafl_targets::EDGES_MAP_SIZE_IN_USE; +use libafl_targets::EDGES_MAP_DEFAULT_SIZE; use nix::sys::signal::Signal; /// The commandline args this fuzzer accepts @@ -87,7 +87,7 @@ struct Opt { pub fn main() { env_logger::init(); - const MAP_SIZE: usize = EDGES_MAP_SIZE_IN_USE; //65536; + const MAP_SIZE: usize = EDGES_MAP_DEFAULT_SIZE; //65536; let opt = Opt::parse(); let corpus_dirs: Vec = [opt.in_dir].to_vec(); diff --git a/fuzzers/forkserver_libafl_cc/src/program.c b/fuzzers/forkserver/forkserver_libafl_cc/src/program.c similarity index 100% rename from fuzzers/forkserver_libafl_cc/src/program.c rename to fuzzers/forkserver/forkserver_libafl_cc/src/program.c diff --git a/fuzzers/forkserver_simple/.gitignore b/fuzzers/forkserver/forkserver_simple/.gitignore similarity index 100% rename from fuzzers/forkserver_simple/.gitignore rename to fuzzers/forkserver/forkserver_simple/.gitignore diff --git a/fuzzers/forkserver/forkserver_simple/Cargo.toml b/fuzzers/forkserver/forkserver_simple/Cargo.toml new file mode 100644 index 0000000000..073744a200 --- /dev/null +++ b/fuzzers/forkserver/forkserver_simple/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "forkserver_simple" +version = "0.14.1" +authors = ["tokatoka "] +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 + +[dependencies] +clap = { version = "4.5.18", features = ["derive"] } +env_logger = "0.11.5" +libafl = { path = "../../../libafl", features = ["std", "derive"] } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +nix = { version = "0.29.0", features = ["signal"] } diff --git a/fuzzers/forkserver_simple/README.md b/fuzzers/forkserver/forkserver_simple/README.md similarity index 100% rename from fuzzers/forkserver_simple/README.md rename to fuzzers/forkserver/forkserver_simple/README.md diff --git a/fuzzers/forkserver_simple/build.rs b/fuzzers/forkserver/forkserver_simple/build.rs similarity index 96% rename from fuzzers/forkserver_simple/build.rs rename to fuzzers/forkserver/forkserver_simple/build.rs index 22f71a6a29..dc8c8dd146 100644 --- a/fuzzers/forkserver_simple/build.rs +++ b/fuzzers/forkserver/forkserver_simple/build.rs @@ -44,7 +44,7 @@ fn main() { let mut compile_command = Command::new(afl_cc_path); compile_command .args(["src/program.c", "-o"]) - .arg(&format!("{}/target/release/program", &cwd)); + .arg(format!("{cwd}/target/release/program")); if let Ok(llvm_config) = env::var("LLVM_CONFIG") { if !llvm_config.is_empty() { diff --git a/fuzzers/forkserver_simple/corpus/testfile b/fuzzers/forkserver/forkserver_simple/corpus/testfile similarity index 100% rename from fuzzers/forkserver_simple/corpus/testfile rename to fuzzers/forkserver/forkserver_simple/corpus/testfile diff --git a/fuzzers/forkserver_simple/src/main.rs b/fuzzers/forkserver/forkserver_simple/src/main.rs similarity index 98% rename from fuzzers/forkserver_simple/src/main.rs rename to fuzzers/forkserver/forkserver_simple/src/main.rs index 84a34f5402..c026197738 100644 --- a/fuzzers/forkserver_simple/src/main.rs +++ b/fuzzers/forkserver/forkserver_simple/src/main.rs @@ -11,7 +11,7 @@ use libafl::{ fuzzer::{Fuzzer, StdFuzzer}, inputs::BytesInput, monitors::SimpleMonitor, - mutators::{scheduled::havoc_mutations, tokens_mutations, StdScheduledMutator, Tokens}, + mutators::{havoc_mutations, tokens_mutations, StdScheduledMutator, Tokens}, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, stages::mutational::StdMutationalStage, diff --git a/fuzzers/forkserver_simple/src/program.c b/fuzzers/forkserver/forkserver_simple/src/program.c similarity index 100% rename from fuzzers/forkserver_simple/src/program.c rename to fuzzers/forkserver/forkserver_simple/src/program.c diff --git a/fuzzers/fuzzbench/.gitignore b/fuzzers/forkserver/fuzzbench_forkserver/.gitignore similarity index 100% rename from fuzzers/fuzzbench/.gitignore rename to fuzzers/forkserver/fuzzbench_forkserver/.gitignore diff --git a/fuzzers/forkserver/fuzzbench_forkserver/Cargo.toml b/fuzzers/forkserver/fuzzbench_forkserver/Cargo.toml new file mode 100644 index 0000000000..7b8126cc3d --- /dev/null +++ b/fuzzers/forkserver/fuzzbench_forkserver/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "fuzzbench_forkserver" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[profile.release-fuzzbench] +inherits = "release" +debug = false +strip = true + +[build-dependencies] +cc = { version = "1.1.22", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets" } + +log = { version = "0.4.22", features = ["release_max_level_info"] } +clap = { version = "4.5.18", features = ["default"] } +nix = { version = "0.29.0", features = ["signal"] } diff --git a/fuzzers/fuzzbench_forkserver/src/main.rs b/fuzzers/forkserver/fuzzbench_forkserver/src/main.rs similarity index 96% rename from fuzzers/fuzzbench_forkserver/src/main.rs rename to fuzzers/forkserver/fuzzbench_forkserver/src/main.rs index ffa2b903f2..3690978ede 100644 --- a/fuzzers/fuzzbench_forkserver/src/main.rs +++ b/fuzzers/forkserver/fuzzbench_forkserver/src/main.rs @@ -18,12 +18,10 @@ use libafl::{ inputs::BytesInput, monitors::SimpleMonitor, mutators::{ - scheduled::havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, - StdMOptMutator, StdScheduledMutator, Tokens, - }, - observers::{ - CanTrack, HitcountsMapObserver, StdCmpValuesObserver, StdMapObserver, TimeObserver, + havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, StdMOptMutator, + StdScheduledMutator, Tokens, }, + observers::{CanTrack, HitcountsMapObserver, StdCmpObserver, StdMapObserver, TimeObserver}, schedulers::{ powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, StdWeightedScheduler, }, @@ -300,7 +298,8 @@ fn fuzz( 5, )?; - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( @@ -308,7 +307,7 @@ fn fuzz( StdWeightedScheduler::with_schedule( &mut state, &edges_observer, - Some(PowerSchedule::EXPLORE), + Some(PowerSchedule::explore()), ), ); @@ -352,7 +351,7 @@ fn fuzz( cmplog_shmem.write_to_env("__AFL_CMPLOG_SHM_ID").unwrap(); let cmpmap = unsafe { OwnedRefMut::::from_shmem(&mut cmplog_shmem) }; - let cmplog_observer = StdCmpValuesObserver::new("cmplog", cmpmap, true); + let cmplog_observer = StdCmpObserver::new("cmplog", cmpmap, true); let cmplog_executor = ForkserverExecutor::builder() .program(exec) diff --git a/fuzzers/forkserver/fuzzbench_forkserver_cmplog/Cargo.toml b/fuzzers/forkserver/fuzzbench_forkserver_cmplog/Cargo.toml new file mode 100644 index 0000000000..4ce57af8d8 --- /dev/null +++ b/fuzzers/forkserver/fuzzbench_forkserver_cmplog/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "fuzzbench_forkserver_cmplog" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[profile.release-fuzzbench] +inherits = "release" +debug = false +strip = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +clap = { version = "4.5.18", features = ["default"] } +nix = { version = "0.29.0", features = ["signal"] } diff --git a/fuzzers/fuzzbench_forkserver_cmplog/src/main.rs b/fuzzers/forkserver/fuzzbench_forkserver_cmplog/src/main.rs similarity index 97% rename from fuzzers/fuzzbench_forkserver_cmplog/src/main.rs rename to fuzzers/forkserver/fuzzbench_forkserver_cmplog/src/main.rs index 403296744d..9fab692177 100644 --- a/fuzzers/fuzzbench_forkserver_cmplog/src/main.rs +++ b/fuzzers/forkserver/fuzzbench_forkserver_cmplog/src/main.rs @@ -18,8 +18,7 @@ use libafl::{ inputs::BytesInput, monitors::SimpleMonitor, mutators::{ - scheduled::havoc_mutations, token_mutations::AFLppRedQueen, tokens_mutations, - StdMOptMutator, Tokens, + havoc_mutations, token_mutations::AFLppRedQueen, tokens_mutations, StdMOptMutator, Tokens, }, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, schedulers::{ @@ -301,7 +300,8 @@ fn fuzz( 5, )?; - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( @@ -309,7 +309,7 @@ fn fuzz( StdWeightedScheduler::with_schedule( &mut state, &edges_observer, - Some(PowerSchedule::EXPLORE), + Some(PowerSchedule::explore()), ), ); @@ -368,7 +368,7 @@ fn fuzz( .build(tuple_list!(cmplog_observer)) .unwrap(); - let tracing = AFLppCmplogTracingStage::with_cmplog_observer(cmplog_executor, cmplog_ref); + let tracing = AFLppCmplogTracingStage::new(cmplog_executor, cmplog_ref); // Setup a randomic Input2State stage let rq = MultiMutationalStage::new(AFLppRedQueen::with_cmplog_options(true, true)); diff --git a/fuzzers/fuzzbench_forkserver_cmplog/test/compile.sh b/fuzzers/forkserver/fuzzbench_forkserver_cmplog/test/compile.sh similarity index 100% rename from fuzzers/fuzzbench_forkserver_cmplog/test/compile.sh rename to fuzzers/forkserver/fuzzbench_forkserver_cmplog/test/compile.sh diff --git a/fuzzers/fuzzbench_forkserver_cmplog/test/test-cmplog.c b/fuzzers/forkserver/fuzzbench_forkserver_cmplog/test/test-cmplog.c similarity index 100% rename from fuzzers/fuzzbench_forkserver_cmplog/test/test-cmplog.c rename to fuzzers/forkserver/fuzzbench_forkserver_cmplog/test/test-cmplog.c diff --git a/fuzzers/forkserver/libafl-fuzz/.gitignore b/fuzzers/forkserver/libafl-fuzz/.gitignore new file mode 100644 index 0000000000..0a452b5af1 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/.gitignore @@ -0,0 +1,4 @@ +test/out-cmplog +test/out-instr +test/output-cmplog/ +test/output/ diff --git a/fuzzers/forkserver/libafl-fuzz/Cargo.toml b/fuzzers/forkserver/libafl-fuzz/Cargo.toml new file mode 100644 index 0000000000..ca4010f576 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "libafl-fuzz" +version = "0.14.1" +description = "Reimplementation of afl-fuzz on top of LibAFL" +documentation = "https://docs.rs/libafl" +authors = ["Aarnav Bos "] +readme = "./README.md" +license = "MIT OR Apache-2.0" +keywords = ["fuzzing", "testing", "security"] +repository = "https://github.com/AFLplusplus/LibAFL/" +categories = ["development-tools::testing"] +edition = "2021" + +[dependencies] +clap = { version = "4.5.18", features = ["derive", "env"] } +env_logger = "0.11.3" +libafl = { path = "../../../libafl", features = [ + "std", + "derive", + "track_hit_feedbacks", + "clap", + "errors_backtrace", +] } +libafl_bolts = { path = "../../../libafl_bolts", features = [ + "std", + "errors_backtrace", +] } +libafl_targets = { path = "../../../libafl_targets" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +memmap2 = "0.9.4" +nix = { version = "0.29.0", features = ["fs"] } +regex = "1.10.5" +serde = { version = "1.0.117", features = ["derive"] } + +[target.'cfg(target_os = "linux")'.dependencies] +libafl_nyx = { path = "../../../libafl_nyx", optional = true } + +[features] +default = ["track_hit_feedbacks"] +track_hit_feedbacks = ["libafl/track_hit_feedbacks"] +fuzzbench = [] +nyx = ["dep:libafl_nyx"] diff --git a/fuzzers/forkserver/libafl-fuzz/Makefile.toml b/fuzzers/forkserver/libafl-fuzz/Makefile.toml new file mode 100644 index 0000000000..23b74925b7 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/Makefile.toml @@ -0,0 +1,322 @@ +[env] +PROJECT_DIR = { script = ["pwd"] } +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } +FUZZER_NAME = 'libafl-fuzz' +FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' +LLVM_CONFIG = { value = "llvm-config-18", condition = { env_not_set = [ + "LLVM_CONFIG", +] } } +AFL_VERSION = "5777ceaf23f48ae4ceae60e4f3a79263802633c6" +AFL_DIR = { value = "${PROJECT_DIR}/AFLplusplus" } +AFL_CC_PATH = { value = "${AFL_DIR}/afl-clang-fast" } +CC = { value = "clang" } + +[tasks.build_afl] +script_runner = "@shell" +script = ''' +if [ ! -d "$AFL_DIR" ]; then + git clone https://github.com/AFLplusplus/AFLplusplus.git + cd ${AFL_DIR} + git checkout ${AFL_VERSION} + LLVM_CONFIG=${LLVM_CONFIG} make +fi +''' +[tasks.build_frida_mode] +script_runner = '@shell' +script = ''' + cd ${AFL_DIR} + cd frida_mode + LLVM_CONFIG=${LLVM_CONFIG} make + cd ../.. +''' +[tasks.build_qemuafl] +script_runner = "@shell" +script = ''' +cd ${AFL_DIR}/qemu_mode +./build_qemu_support.sh +cd ../.. +''' +dependencies = ["build_afl"] + +[tasks.build_unicorn_mode] +script_runner = "@shell" +script = ''' +cd ${AFL_DIR}/unicorn_mode +./build_unicorn_support.sh +cd ../.. +''' +dependencies = ["build_afl"] + +# Test +[tasks.test] +linux_alias = "test_unix" +mac_alias = "test_unix" +windows_alias = "unsupported" + +[tasks.test_unix] +script_runner = "@shell" +script = "echo done" +dependencies = [ + "build_afl", + "test_instr", + "test_cmplog", + "test_frida", + "test_qemu", + "test_unicorn_mode", + # nyx + # since we cannot test nyx mode on CI, let's build it + # "build_nyx_mode", + # fuzzbench + "test_instr_fuzzbench", +] + +[tasks.build_libafl_fuzz] +script_runner = "@shell" +script = "cargo build --profile ${PROFILE}" + +[tasks.build_libafl_fuzz_fuzzbench] +script_runner = "@shell" +script = "cargo build --profile ${PROFILE} --features fuzzbench" + +[tasks.test_instr] +script_runner = "@shell" +script = ''' +AFL_PATH=${AFL_DIR} ${AFL_CC_PATH} ./test/test-instr.c -o ./test/out-instr + +export LIBAFL_DEBUG_OUTPUT=1 +export AFL_CORES=0 +export AFL_STATS_INTERVAL=1 + +timeout 5 ${FUZZER} -i ./test/seeds -o ./test/output ./test/out-instr || true +test -n "$( ls ./test/output/fuzzer_main/queue/id:000002* 2>/dev/null )" || { + echo "No new corpus entries found" + exit 1 +} +test -n "$( ls ./test/output/fuzzer_main/fuzzer_stats 2>/dev/null )" || { + echo "No fuzzer_stats file found" + exit 1 +} +test -n "$( ls ./test/output/fuzzer_main/plot_data 2>/dev/null )" || { + echo "No plot_data found" + exit 1 +} +test -d "./test/output/fuzzer_main/hangs" || { + echo "No hangs directory found" + exit 1 +} +test -d "./test/output/fuzzer_main/crashes" || { + echo "No crashes directory found" + exit 1 +} +''' +dependencies = ["build_afl", "build_libafl_fuzz"] + +[tasks.test_instr_fuzzbench] +script_runner = "@shell" +script = ''' +AFL_PATH=${AFL_DIR} ${AFL_CC_PATH} ./test/test-instr.c -o ./test/out-instr + +export LIBAFL_DEBUG_OUTPUT=1 +export AFL_CORES=0 +export AFL_STATS_INTERVAL=1 + +timeout 5 ${FUZZER} -i ./test/seeds -o ./test/output-fuzzbench ./test/out-instr || true +test -n "$( ls ./test/output-fuzzbench/fuzzer_main/queue/id:000002* 2>/dev/null )" || { + echo "No new corpus entries found" + exit 1 +} +test -n "$( ls ./test/output-fuzzbench/fuzzer_main/fuzzer_stats 2>/dev/null )" || { + echo "No fuzzer_stats file found" + exit 1 +} +test -n "$( ls ./test/output-fuzzbench/fuzzer_main/plot_data 2>/dev/null )" || { + echo "No plot_data found" + exit 1 +} +test -d "./test/output-fuzzbench/fuzzer_main/hangs" || { + echo "No hangs directory found" + exit 1 +} +test -d "./test/output-fuzzbench/fuzzer_main/crashes" || { + echo "No crashes directory found" + exit 1 +} +''' +dependencies = ["build_afl", "build_libafl_fuzz_fuzzbench"] + +[tasks.test_cmplog] +script_runner = "@shell" +script = ''' +# cmplog TODO: AFL_BENCH_UNTIL_CRASH=1 instead of timeout 15s +AFL_LLVM_CMPLOG=1 AFL_PATH=${AFL_DIR} ${AFL_CC_PATH} ./test/test-cmplog.c -o ./test/out-cmplog +LIBAFL_DEBUG_OUTPUT=1 AFL_CORES=0 timeout 15 ${FUZZER} -Z -l 3 -m 0 -V30 -i ./test/seeds_cmplog -o ./test/output-cmplog -c 0 ./test/out-cmplog || true +test -n "$( ls ${PROJECT_DIR}/test/output-cmplog/fuzzer_main/hangs/id:0000* ${PROJECT_DIR}/test/output-cmplog/fuzzer_main/crashes/id:0000*)" || { + echo "No crashes found" + exit 1 +} +''' +dependencies = ["build_afl", "build_libafl_fuzz"] + +[tasks.test_frida] +script_runner = "@shell" +script = ''' +${CC} -no-pie ./test/test-instr.c -o ./test/out-frida + +export AFL_PATH=${AFL_DIR} +export AFL_CORES=0 +export AFL_STATS_INTERVAL=1 + +timeout 15 ${FUZZER} -m 0 -O -i ./test/seeds_frida -o ./test/output-frida -- ./test/out-frida || true +test -n "$( ls ./test/output-frida/fuzzer_main/queue/id:000002* 2>/dev/null )" || { + echo "No new corpus entries found for FRIDA mode" + exit 1 +} + +${CC} ./test/test-cmpcov.c -o ./test/out-frida-cmpcov +AFL_FRIDA_VERBOSE=1 timeout 15 ${FUZZER} -m 0 -O -c 0 -l 3 -i ./test/seeds_frida -o ./test/output-frida-cmpcov -- ./test/out-frida-cmpcov || true +test -n "$( ls ./test/output-frida-cmpcov/fuzzer_main/queue/id:000003* 2>/dev/null )" || { + echo "No new corpus entries found for FRIDA cmplog mode" + exit 1 +} +export AFL_FRIDA_PERSISTENT_ADDR=0x`nm ./test/out-frida | grep -Ei "T _main|T main" | awk '{print $1}'` +timeout 15 ${FUZZER} -m 0 -O -i ./test/seeds_frida -o ./test/output-frida-persistent -- ./test/out-frida || true + +test -n "$( ls ./test/output-frida-persistent/fuzzer_main/queue/id:000002* 2>/dev/null )" || { + echo "No new corpus entries found for FRIDA persistent mode" + exit 1 +} + +RUNTIME_PERSISTENT=`grep execs_done ./test/output-frida-persistent/fuzzer_main/fuzzer_stats | awk '{print$3}'` +RUNTIME=`grep execs_done ./test/output-frida/fuzzer_main/fuzzer_stats | awk '{print$3}'` +test -n "$RUNTIME" -a -n "$RUNTIME_PERSISTENT" && { + DIFF=`expr $RUNTIME_PERSISTENT / $RUNTIME` + test "$DIFF" -gt 1 && { # must be at least twice as fast + echo "persistent frida_mode was noticeably faster than standard frida_mode" + } || { + echo "persistent frida_mode" $RUNTIME_PERSISTENT "was not noticeably faster than standard frida_mode" $RUNTIME + exit 1 + } +} || { + echo "we got no data on executions performed? weird!" +} + +unset AFL_FRIDA_PERSISTENT_ADDR +''' +dependencies = ["build_afl", "build_frida_mode", "build_libafl_fuzz"] + +[tasks.test_qemu] +script_runner = "@shell" +script = ''' +${CC} -pie -fPIE ./test/test-instr.c -o ./test/out-qemu +${CC} -o ./test/out-qemu-cmpcov ./test/test-cmpcov.c + +export AFL_PATH=${AFL_DIR} +export AFL_CORES=0 +export AFL_STATS_INTERVAL=1 + +timeout 15 ${FUZZER} -m 0 -Q -i ./test/seeds_qemu -o ./test/output-qemu -- ./test/out-qemu || true +test -n "$( ls ./test/output-qemu/fuzzer_main/queue/id:000002* 2>/dev/null )" || { + echo "No new corpus entries found for QEMU mode" + exit 1 +} + +export AFL_ENTRYPOINT=`printf 1 | AFL_DEBUG=1 ${AFL_DIR}/afl-qemu-trace ./test/out-qemu 2>&1 >/dev/null | awk '/forkserver/{print $4; exit}'` +timeout 15 ${FUZZER} -m 0 -Q -i ./test/seeds_qemu -o ./test/output-qemu-entrypoint -- ./test/out-qemu || true +test -n "$( ls ./test/output-qemu-entrypoint/fuzzer_main/queue/id:000002* 2>/dev/null )" || { + echo "No new corpus entries found for QEMU mode with AFL_ENTRYPOINT" + exit 1 +} +unset AFL_ENTRYPOINT + +export AFL_PRELOAD=${AFL_DIR}/libcompcov.so +export AFL_COMPCOV_LEVEL=2 +timeout 15 ${FUZZER} -Q -i ./test/seeds_qemu -o ./test/output-qemu-cmpcov -- ./test/out-qemu-cmpcov || true +test -n "$( ls ./test/output-qemu-cmpcov/fuzzer_main/queue/id:000002* 2>/dev/null )" || { + echo "No new corpus entries found for QEMU mode" + exit 1 +} +''' +dependencies = ["build_afl", "build_qemuafl", "build_libafl_fuzz"] + +[tasks.test_unicorn_mode] +script_runner = "@shell" +script = ''' +export AFL_PATH=${AFL_DIR} +export AFL_CORES=0 +export AFL_STATS_INTERVAL=1 + +# TODO: test unicorn persistent mode once it's fixed on AFL++ + +LIBAFL_DEBUG_OUTPUT=1 AFL_DEBUG=1 AFL_DEBUG_CHILD=1 timeout 15s ${FUZZER} -m 0 -U -i ./test/seeds_unicorn -o ./test/output-unicorn-python -- python3 ${AFL_DIR}/unicorn_mode/samples/python_simple/simple_test_harness.py @@ || true +test -n "$( ls ./test/output-unicorn-python/fuzzer_main/queue/id:000003* 2>/dev/null )" || { + echo "No new corpus entries found for Unicorn python3 mode" + exit 1 +} + +export AFL_COMPCOV_LEVEL=2 +LIBAFL_DEBUG_OUTPUT=1 AFL_DEBUG=1 AFL_DEBUG_CHILD=1 timeout 15s ${FUZZER} -m 0 -U -i ./test/seeds_unicorn_cmpcov -o ./test/output-unicorn-cmpcov -- python3 ${AFL_DIR}/unicorn_mode/samples/compcov_x64/compcov_test_harness.py @@ || true +test -n "$( ls ./test/output-unicorn-cmpcov/fuzzer_main/queue/id:000002* 2>/dev/null )" || { + echo "No new corpus entries found for Unicorn cmpcov mode" + exit 1 +} +''' +dependencies = ["build_libafl_fuzz", "build_afl", "build_unicorn_mode"] + +[tasks.test_nyx_mode] +script_runner = "@shell" +script = ''' +export AFL_PATH=${AFL_DIR} +export AFL_CORES=0 +export AFL_STATS_INTERVAL=1 +export AFL_DEBUG=1 +export LIBAFL_DEBUG_OUTPUT=1 +AFL_PATH=${AFL_DIR} ${AFL_CC_PATH} ./test/test-instr.c -o ./test/out-instr +rm -rf ./test/nyx-test +cd ../../../libafl_nyx +rm -rf packer +git clone https://github.com/nyx-fuzz/packer.git +python3 packer/packer/nyx_packer.py \ + ../fuzzers/forkserver/libafl-fuzz/test/out-instr \ + ../fuzzers/forkserver/libafl-fuzz/test/out-nyx \ + afl \ + instrumentation \ + --fast_reload_mode \ + --purge +python3 packer/packer/nyx_config_gen.py ../fuzzers/forkserver/libafl-fuzz/test/out-nyx Kernel +cd ../fuzzers/forkserver/libafl-fuzz/ +timeout 15s ${FUZZER} -i ./test/seeds_nyx -o ./test/output-nyx -X -- ./test/out-nyx +test -n "$( ls ./test/output-nyx/fuzzer_main/queue/id:000003* 2>/dev/null )" || { + echo "No new corpus entries found for Nyx mode!" + exit 1 +} +''' +dependencies = ["build_afl", "build_libafl_fuzz"] + +# since we cannot test nyx mode on CI, let's build it +[tasks.build_nyx_mode] +script_runner = "@shell" +script = "cargo build --profile ${PROFILE} --features nyx" + +[tasks.clean] +linux_alias = "clean_unix" +mac_alias = "clean_unix" +windows_alias = "unsupported" + +[tasks.clean_unix] +script_runner = "@shell" +script = ''' +rm -rf AFLplusplus +rm -rf ./test/output +rm -rf ./test/cmplog-output +rm -rf ./test/output-frida* +rm -rf ./test/output-cmplog +rm -rf ./test/output-qemu* +rm -rf ./test/output-unicorn* +rm ./test/out-* +''' diff --git a/fuzzers/forkserver/libafl-fuzz/README.md b/fuzzers/forkserver/libafl-fuzz/README.md new file mode 100644 index 0000000000..535ed35ece --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/README.md @@ -0,0 +1,70 @@ +Rewrite of afl-fuzz in Rust. + +# TODO +- [x] AFL_HANG_TMOUT +- [x] AFL_NO_AUTODICT +- [x] AFL_MAP_SIZE +- [x] AFL_KILL_SIGNAL +- [x] AFL_BENCH_JUST_ONE +- [x] AFL_DEBUG_CHILD +- [x] AFL_PERSISTENT +- [x] AFL_IGNORE_TIMEOUTS +- [x] AFL_EXIT_ON_SEED_ISSUES +- [x] AFL_BENCH_UNTIL_CRASH +- [x] AFL_TMPDIR +- [x] AFL_CRASH_EXITCODE +- [x] AFL_TARGET_ENV +- [x] AFL_IGNORE_SEED_PROBLEMS (renamed to AFL_IGNORE_SEED_ISSUES) +- [x] AFL_CRASH_EXITCODE +- [x] AFL_INPUT_LEN_MIN +- [x] AFL_INPUT_LEN_MAX +- [x] AFL_CYCLE_SCHEDULES +- [x] AFL_CMPLOG_ONLY_NEW +- [x] AFL_PRELOAD +- [x] AFL_SKIP_BIN_CHECK +- [x] AFL_NO_STARTUP_CALIBRATION (this is default in libafl, not sure if this needs to be changed?) +- [x] AFL_FUZZER_STATS_UPDATE_INTERVAL +- [x] AFL_DEFER_FORKSRV +- [x] AFL_NO_WARN_INSTABILITY (we don't warn anyways, we should maybe?) +- [x] AFL_IMPORT_FIRST (implicit) +- [x] AFL_SYNC_TIME +- [x] AFL_AUTORESUME +- [x] AFL_PERSISTENT_RECORD +- [ ] AFL_FINAL_SYNC +- [ ] AFL_CRASHING_SEEDS_AS_NEW_CRASH +- [ ] AFL_IGNORE_UNKNOWN_ENVS +- [ ] AFL_NO_UI +- [ ] AFL_PIZZA_MODE :) +- [ ] AFL_EXIT_WHEN_DONE +- [ ] AFL_EXIT_ON_TIME +- [ ] AFL_NO_AFFINITY +- [ ] AFL_FORKSERVER_KILL_SIGNAL +- [ ] AFL_EXPAND_HAVOC_NOW +- [ ] AFL_NO_FORKSRV +- [ ] AFL_FORKSRV_INIT_TMOUT +- [ ] AFL_TRY_AFFINITY +- [ ] AFL_FAST_CAL +- [ ] AFL_NO_CRASH_README +- [ ] AFL_KEEP_TIMEOUTS +- [ ] AFL_TESTCACHE_SIZE +- [ ] AFL_NO_ARITH +- [ ] AFL_DISABLE_TRIM +- [ ] AFL_MAX_DET_EXTRAS +- [ ] AFL_IGNORE_PROBLEMS +- [ ] AFL_IGNORE_PROBLEMS_COVERAGE +- [ ] AFL_STATSD_TAGS_FLAVOR +- [ ] AFL_STATSD +- [ ] AFL_STATSD_PORT +- [ ] AFL_STATSD_HOST +- [ ] AFL_IMPORT +- [ ] AFL_SHUFFLE_QUEUE +- [ ] AFL_CUSTOM_QEMU_BIN +- [ ] AFL_PATH +- [ ] AFL_CUSTOM_MUTATOR_LIBRARY +- [ ] AFL_CUSTOM_MUTATOR_ONLY +- [ ] AFL_PYTHON_MODULE +- [ ] AFL_DEBUG +- [ ] AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES +- [ ] AFL_DUMB_FORKSRV +- [ ] AFL_EARLY_FORKSERVER +- [ ] AFL_NO_SNAPSHOT diff --git a/fuzzers/forkserver/libafl-fuzz/rustfmt.toml b/fuzzers/forkserver/libafl-fuzz/rustfmt.toml new file mode 100644 index 0000000000..44b6aab556 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/rustfmt.toml @@ -0,0 +1,2 @@ +group_imports = "StdExternalCrate" +imports_granularity = "Crate" diff --git a/fuzzers/forkserver/libafl-fuzz/src/corpus.rs b/fuzzers/forkserver/libafl-fuzz/src/corpus.rs new file mode 100644 index 0000000000..ccf7d7ba88 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/corpus.rs @@ -0,0 +1,178 @@ +use std::{ + borrow::Cow, + fs::File, + io, + io::{BufRead, BufReader}, + path::Path, +}; + +use libafl::{ + corpus::{Corpus, CorpusId, Testcase}, + inputs::BytesInput, + state::{HasCorpus, HasExecutions, HasSolutions, HasStartTime}, + Error, +}; +use libafl_bolts::current_time; +use nix::{ + errno::Errno, + fcntl::{Flock, FlockArg}, +}; + +use crate::{fuzzer::LibaflFuzzState, OUTPUT_GRACE}; + +pub fn generate_base_filename(state: &mut LibaflFuzzState, id: CorpusId) -> String { + let id = id.0; + let is_seed = state.must_load_initial_inputs(); + let name = if is_seed { + // TODO set orig filename + format!("id:{id:0>6},time:0,execs:0,orig:TODO",) + } else { + // TODO: change hardcoded values of op (operation aka stage_name) & rep (amount of stacked mutations applied) + let src = if let Some(parent_id) = state.corpus().current() { + parent_id.0 + } else { + 0 + }; + let execs = *state.executions(); + let time = (current_time() - *state.start_time()).as_secs(); + format!("id:{id:0>6},src:{src:0>6},time:{time},execs:{execs},op:havoc,rep:0",) + }; + name +} + +// The function needs to be compatible with CustomFilepathToTestcaseFeedback +#[allow(clippy::unnecessary_wraps)] +pub fn set_corpus_filepath( + state: &mut LibaflFuzzState, + testcase: &mut Testcase, + _fuzzer_dir: &Path, +) -> Result<(), Error> { + let id = state.corpus().peek_free_id(); + let mut name = generate_base_filename(state, id); + if testcase.hit_feedbacks().contains(&Cow::Borrowed("edges")) { + name = format!("{name},+cov"); + } + *testcase.filename_mut() = Some(name); + // We don't need to set the path since everything goes into one dir unlike with Objectives + Ok(()) +} + +// The function needs to be compatible with CustomFilepathToTestcaseFeedback +#[allow(clippy::unnecessary_wraps)] +pub fn set_solution_filepath( + state: &mut LibaflFuzzState, + testcase: &mut Testcase, + output_dir: &Path, +) -> Result<(), Error> { + // sig:0SIGNAL + // TODO: verify if 0 time if objective found during seed loading + let id = state.solutions().peek_free_id(); + let mut filename = generate_base_filename(state, id); + let mut dir = "crashes"; + if testcase + .hit_objectives() + .contains(&Cow::Borrowed("TimeoutFeedback")) + { + filename = format!("{filename},+tout"); + dir = "hangs"; + } + *testcase.file_path_mut() = Some(output_dir.join(dir).join(&filename)); + *testcase.filename_mut() = Some(filename); + Ok(()) +} + +fn parse_time_line(line: &str) -> Result { + line.split(": ") + .last() + .ok_or(Error::illegal_state("invalid stats file"))? + .parse() + .map_err(|_| Error::illegal_state("invalid stats file")) +} + +pub fn check_autoresume(fuzzer_dir: &Path, auto_resume: bool) -> Result, Error> { + if !fuzzer_dir.exists() { + std::fs::create_dir(fuzzer_dir)?; + } + // lock the fuzzer dir + let fuzzer_dir_fd = File::open(fuzzer_dir)?; + let file = match Flock::lock(fuzzer_dir_fd, FlockArg::LockExclusiveNonblock) { + Ok(l) => l, + Err(err) => match err.1 { + Errno::EWOULDBLOCK => return Err(Error::illegal_state( + "Looks like the job output directory is being actively used by another instance", + )), + _ => { + return Err(Error::last_os_error( + format!("Error creating lock for output dir: exit code {}", err.1).as_str(), + )) + } + }, + }; + // Check if we have an existing fuzzed fuzzer_dir + let stats_file = fuzzer_dir.join("fuzzer_stats"); + if stats_file.exists() { + let file = File::open(&stats_file).unwrap(); + let reader = BufReader::new(file); + let mut start_time: u64 = 0; + let mut last_update: u64 = 0; + for (index, line) in reader.lines().enumerate() { + match index { + // first line is start_time + 0 => { + start_time = parse_time_line(&line?).unwrap(); + } + // second_line is last_update + 1 => { + last_update = parse_time_line(&line?).unwrap(); + } + _ => break, + } + } + if !auto_resume && last_update.saturating_sub(start_time) > OUTPUT_GRACE * 60 { + return Err(Error::illegal_state("The job output directory already exists and contains results! use AFL_AUTORESUME=1 or provide \"-\" for -i ")); + } + } + if !auto_resume { + let queue_dir = fuzzer_dir.join("queue"); + let hangs_dir = fuzzer_dir.join("hangs"); + let crashes_dir = fuzzer_dir.join("crashes"); + // Create our (sub) directories for Objectives & Corpus + create_dir_if_not_exists(&crashes_dir).expect("should be able to create crashes dir"); + create_dir_if_not_exists(&hangs_dir).expect("should be able to create hangs dir"); + create_dir_if_not_exists(&queue_dir).expect("should be able to create queue dir"); + } + Ok(file) +} + +pub fn create_dir_if_not_exists(path: &Path) -> io::Result<()> { + if path.is_file() { + return Err(io::Error::new( + // TODO: change this to ErrorKind::NotADirectory + // when stabilitzed https://github.com/rust-lang/rust/issues/86442 + io::ErrorKind::Other, + format!("{} expected a directory; got a file", path.display()), + )); + } + match std::fs::create_dir(path) { + Ok(()) => Ok(()), + Err(err) => { + if matches!(err.kind(), io::ErrorKind::AlreadyExists) { + Ok(()) + } else { + Err(err) + } + } + } +} + +#[cfg(not(feature = "fuzzbench"))] +pub fn remove_main_node_file(output_dir: &Path) -> Result<(), Error> { + for entry in std::fs::read_dir(output_dir)?.filter_map(Result::ok) { + let path = entry.path(); + if path.is_dir() && path.join("is_main_node").exists() { + std::fs::remove_file(path.join("is_main_node"))?; + return Ok(()); + } + } + Err(Error::illegal_state("main node's directory not found!")) +} diff --git a/fuzzers/forkserver/libafl-fuzz/src/env_parser.rs b/fuzzers/forkserver/libafl-fuzz/src/env_parser.rs new file mode 100644 index 0000000000..d909bfbf81 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/env_parser.rs @@ -0,0 +1,162 @@ +use std::{collections::HashMap, path::PathBuf, time::Duration}; + +use libafl::{stages::afl_stats::AFL_FUZZER_STATS_UPDATE_INTERVAL_SECS, Error}; +use libafl_bolts::core_affinity::Cores; + +use crate::Opt; + +pub fn parse_envs(opt: &mut Opt) -> Result<(), Error> { + if let Ok(res) = std::env::var("AFL_CORES") { + opt.cores = Some(Cores::from_cmdline(&res)?); + } else { + return Err(Error::illegal_argument("Missing AFL_CORES")); + } + if let Ok(res) = std::env::var("AFL_INPUT_LEN_MAX") { + opt.max_input_len = Some(res.parse()?); + } + if let Ok(res) = std::env::var("AFL_INPUT_LEN_MIN") { + opt.min_input_len = Some(res.parse()?); + } + if let Ok(res) = std::env::var("AFL_BENCH_JUST_ONE") { + opt.bench_just_one = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_BENCH_UNTIL_CRASH") { + opt.bench_until_crash = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_HANG_TMOUT") { + opt.hang_timeout = res.parse()?; + } else { + opt.hang_timeout = 100; + } + if let Ok(res) = std::env::var("AFL_DEBUG_CHILD") { + opt.debug_child = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_PERSISTENT") { + opt.is_persistent = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_NO_AUTODICT") { + opt.no_autodict = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_MAP_SIZE") { + let map_size = validate_map_size(res.parse()?)?; + opt.map_size = Some(map_size); + }; + if let Ok(res) = std::env::var("AFL_IGNORE_TIMEOUT") { + opt.ignore_timeouts = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_TMPDIR") { + opt.cur_input_dir = Some(PathBuf::from(res)); + } + if let Ok(res) = std::env::var("AFL_CRASH_EXITCODE") { + opt.crash_exitcode = Some(res.parse()?); + } + if let Ok(res) = std::env::var("AFL_TARGET_ENV") { + opt.target_env = parse_target_env(&res)?; + } + if let Ok(res) = std::env::var("AFL_CYCLE_SCHEDULES") { + opt.cycle_schedules = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_CMPLOG_ONLY_NEW") { + opt.cmplog_only_new = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_PRELOAD") { + opt.afl_preload = Some(res); + } + if let Ok(res) = std::env::var("AFL_SKIP_BIN_CHECK") { + opt.skip_bin_check = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_AUTORESUME") { + opt.auto_resume = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_DEFER_FORKSRV") { + opt.defer_forkserver = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_FUZZER_STATS_UPDATE_INTERVAL") { + opt.stats_interval = res.parse()?; + } else { + opt.stats_interval = AFL_FUZZER_STATS_UPDATE_INTERVAL_SECS; + } + if let Ok(res) = std::env::var("AFL_BROKER_PORT") { + opt.broker_port = Some(res.parse()?); + } + if let Ok(res) = std::env::var("AFL_EXIT_ON_SEED_ISSUES") { + opt.exit_on_seed_issues = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_IGNORE_SEED_ISSUES") { + opt.ignore_seed_issues = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_CRASHING_SEED_AS_NEW_CRASH") { + opt.crash_seed_as_new_crash = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_FRIDA_PERSISTENT_ADDR") { + opt.frida_persistent_addr = Some(res); + } + if let Ok(res) = std::env::var("AFL_QEMU_CUSTOM_BIN") { + opt.qemu_custom_bin = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_CS_CUSTOM_BIN") { + opt.cs_custom_bin = parse_bool(&res)?; + } + if let Ok(res) = std::env::var("AFL_KILL_SIGNAL") { + opt.kill_signal = Some(res.parse()?); + } + if let Ok(res) = std::env::var("AFL_KILL_SIGNAL") { + opt.kill_signal = Some(res.parse()?); + } + if let Ok(res) = std::env::var("AFL_PERSISTENT_RECORD") { + opt.persistent_record = res.parse()?; + } + if let Ok(res) = std::env::var("AFL_SYNC_TIME") { + opt.foreign_sync_interval = Duration::from_secs(res.parse::()? * 60); + } else { + opt.foreign_sync_interval = Duration::from_secs(AFL_DEFAULT_FOREIGN_SYNC_INTERVAL); + } + if let Ok(res) = std::env::var("AFL_USE_FASAN") { + opt.frida_asan = parse_bool(&res)?; + } + Ok(()) +} + +fn parse_bool(val: &str) -> Result { + match val { + "1" => Ok(true), + "0" => Ok(false), + _ => Err(Error::illegal_argument( + "boolean values must be either 1 for true or 0 for false", + )), + } +} + +/// parse `AFL_TARGET_ENV`; expects: FOO=BAR TEST=ASD +fn parse_target_env(s: &str) -> Result>, Error> { + let env_regex = regex::Regex::new(r"([^\s=]+)\s*=\s*([^\s]+)").unwrap(); + let mut target_env = HashMap::new(); + for vars in env_regex.captures_iter(s) { + _ = target_env.insert( + vars.get(1) + .ok_or(Error::illegal_argument("invalid AFL_TARGET_ENV format"))? + .as_str() + .to_string(), + vars.get(2) + .ok_or(Error::illegal_argument("invalid AFL_TARGET_ENV format"))? + .as_str() + .to_string(), + ); + } + Ok(Some(target_env)) +} + +fn validate_map_size(map_size: usize) -> Result { + if map_size > AFL_MAP_SIZE_MIN && map_size < AFL_MAP_SIZE_MAX { + Ok(map_size) + } else { + Err(Error::illegal_argument(format!( + "AFL_MAP_SIZE not in range {AFL_MAP_SIZE_MIN} (2 ^ 3) - {AFL_MAP_SIZE_MAX} (2 ^ 30)", + ))) + } +} + +const AFL_MAP_SIZE_MIN: usize = usize::pow(2, 3); +const AFL_MAP_SIZE_MAX: usize = usize::pow(2, 30); +const AFL_DEFAULT_FOREIGN_SYNC_INTERVAL: u64 = 20 * 60; +pub const AFL_DEFAULT_MAP_SIZE: usize = 65536; diff --git a/fuzzers/forkserver/libafl-fuzz/src/executor.rs b/fuzzers/forkserver/libafl-fuzz/src/executor.rs new file mode 100644 index 0000000000..42db166408 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/executor.rs @@ -0,0 +1,415 @@ +use std::{ + fs::File, + marker::PhantomData, + os::unix::fs::PermissionsExt, + path::{Path, PathBuf}, +}; + +use libafl::{ + executors::{Executor, ExitKind, HasObservers, HasTimeout}, + observers::ObserversTuple, + state::{State, UsesState}, + Error, +}; +use libafl_bolts::tuples::RefIndexable; +use memmap2::{Mmap, MmapOptions}; +use nix::libc::{S_IRUSR, S_IXUSR}; + +use crate::{Opt, DEFER_SIG, PERSIST_SIG}; + +const AFL_PATH: &str = "/usr/local/lib/afl/"; +const BIN_PATH: &str = "/usr/local/bin/"; + +// TODO better error messages and logging +pub fn check_binary(opt: &mut Opt, shmem_env_var: &str) -> Result<(), Error> { + println!("Validating target binary..."); + + let bin_path; + // check if it is a file path + if opt.executable.components().count() == 1 { + // check $PATH for the binary. + if let Some(full_bin_path) = find_executable_in_path(&opt.executable) { + opt.executable = full_bin_path; + bin_path = &opt.executable; + } else { + return Err(Error::illegal_argument(format!( + "Program '{}' not found or not executable", + opt.executable.display() + ))); + } + } else { + bin_path = &opt.executable; + #[cfg(feature = "nyx")] + { + if opt.nyx_mode { + if !bin_path.is_symlink() && bin_path.is_dir() { + let config_file = bin_path.join("config.ron"); + if !config_file.is_symlink() && config_file.is_file() { + return Ok(()); + } + } + return Err(Error::illegal_argument( + format!( + "Directory '{}' not found, or is a symlink or is not a nyx share directory", + bin_path.display() + ) + .as_str(), + )); + } + } + } + let metadata = bin_path.metadata()?; + // AFL++ does not follow symlinks, BUT we do. + let is_reg = bin_path.is_file(); + let bin_size = metadata.len(); + let is_executable = metadata.permissions().mode() & 0o111 != 0; + if !is_reg || !is_executable || bin_size < 4 { + return Err(Error::illegal_argument(format!( + "Program '{}' not found or not executable", + bin_path.display() + ))); + } + if opt.skip_bin_check + || opt.wine_mode + || opt.unicorn_mode + || (opt.qemu_mode && opt.qemu_custom_bin) + || (opt.forkserver_cs && opt.cs_custom_bin) + || opt.non_instrumented_mode + { + return Ok(()); + } + + let file = File::open(bin_path)?; + let mmap = unsafe { MmapOptions::new().map(&file)? }; + + // check if it's a shell script + if mmap[0..1] == [0x43, 0x41] { + // TODO: finish error message + return Err(Error::illegal_argument( + "Oops, the target binary looks like a shell script.", + )); + } + + // check if the binary is an ELF file + #[cfg(feature = "nyx")] + if mmap[0..4] != [0x7f, 0x45, 0x4c, 0x46] { + return Err(Error::illegal_argument(format!( + "Program '{}' is not an ELF binary", + bin_path.display() + ))); + } + + #[cfg(target_vendor = "apple")] + { + if (mmap[0] != 0xCF || mmap[1] != 0xFA || mmap[2] != 0xED) + && (mmap[0] != 0xCA || mmap[1] != 0xFE || mmap[2] != 0xBA) + { + return Err(Error::illegal_argument(format!( + "Program '{}' is not a 64-bit or universal Mach-O binary", + bin_path.display() + ))); + } + } + + let check_instrumentation = !opt.qemu_mode + && !opt.frida_mode + && !opt.unicorn_mode + && !opt.forkserver_cs + && !opt.non_instrumented_mode; + + #[cfg(feature = "nyx")] + let check_instrumentation = check_instrumentation && !opt.nyx_mode; + + if check_instrumentation && !is_instrumented(&mmap, shmem_env_var) { + return Err(Error::illegal_argument( + "target binary is not instrumented correctly", + )); + } + + if (opt.forkserver_cs || opt.qemu_mode || opt.frida_mode) + && is_instrumented(&mmap, shmem_env_var) + { + return Err(Error::illegal_argument( + "Instrumentation found in -Q/-O mode", + )); + } + + if mmap_has_substr(&mmap, "__asan_init") + || mmap_has_substr(&mmap, "__lsan_init") + || mmap_has_substr(&mmap, "__lsan_init") + { + opt.uses_asan = true; + } + + if mmap_has_substr(&mmap, PERSIST_SIG) { + opt.is_persistent = true; + } else if opt.is_persistent { + println!("persistent mode enforced"); + } else if opt.frida_persistent_addr.is_some() { + opt.is_persistent = true; + opt.defer_forkserver = true; + println!("FRIDA persistent mode configuration options detected"); + } + + if opt.frida_mode || mmap_has_substr(&mmap, DEFER_SIG) { + println!("deferred forkserver binary detected"); + opt.defer_forkserver = true; + } else if opt.defer_forkserver { + println!("defer forkserver enforced"); + } + + Ok(()) + // Safety: unmap() is called on Mmap object Drop +} + +fn mmap_has_substr(mmap: &Mmap, sub_str: &str) -> bool { + let mmap_len = mmap.len(); + let substr_len = sub_str.len(); + if mmap_len < substr_len { + return false; + } + for i in 0..(mmap_len - substr_len) { + if &mmap[i..i + substr_len] == sub_str.as_bytes() { + return true; + } + } + false +} + +fn is_instrumented(mmap: &Mmap, shmem_env_var: &str) -> bool { + mmap_has_substr(mmap, shmem_env_var) +} + +fn find_executable_in_path>(executable: &P) -> Option { + std::env::var_os("PATH").and_then(|paths| { + std::env::split_paths(&paths).find_map(|dir| { + let full_path = dir.join(executable); + if full_path.is_file() { + Some(full_path) + } else { + None + } + }) + }) +} + +pub fn find_afl_binary(filename: &str, same_dir_as: Option) -> Result { + let extension = Path::new(filename).extension(); + let is_library = if let Some(extension) = extension { + extension.eq_ignore_ascii_case("so") || extension.eq_ignore_ascii_case("dylib") + } else { + false + }; + + #[allow(clippy::useless_conversion)] // u16 on MacOS, u32 on Linux + let permission = if is_library { + u32::from(S_IRUSR) // user can read + } else { + u32::from(S_IXUSR) // user can exec + }; + + // First we check if it is present in AFL_PATH + if let Ok(afl_path) = std::env::var("AFL_PATH") { + let file = PathBuf::from(afl_path).join(filename); + if check_file_found(&file, permission) { + return Ok(file); + } + } + + // next we check the same directory as the provided parameter + if let Some(same_dir_as) = same_dir_as { + if let Some(parent_dir) = same_dir_as.parent() { + let file = parent_dir.join(filename); + if check_file_found(&file, permission) { + return Ok(file); + } + } + } + + // check sensible defaults + let file = PathBuf::from(if is_library { AFL_PATH } else { BIN_PATH }).join(filename); + let found = check_file_found(&file, permission); + if found { + return Ok(file); + } + + if !is_library { + // finally, check the path for the binary + return find_executable_in_path(&filename) + .ok_or(Error::unknown(format!("cannot find {filename}"))); + } + + Err(Error::unknown(format!("cannot find {filename}"))) +} + +fn check_file_found(file: &Path, perm: u32) -> bool { + if !file.exists() { + return false; + } + if let Ok(metadata) = file.metadata() { + return metadata.permissions().mode() & perm != 0; + } + false +} + +#[cfg(feature = "nyx")] +pub enum SupportedExecutors { + Forkserver(FSV, PhantomData<(S, OT, NYX)>), + Nyx(NYX), +} + +#[cfg(feature = "nyx")] +impl UsesState for SupportedExecutors +where + S: State, +{ + type State = S; +} + +#[cfg(feature = "nyx")] +impl Executor for SupportedExecutors +where + S: State, + Z: UsesState, + EM: UsesState, + FSV: Executor, + NYX: Executor, +{ + fn run_target( + &mut self, + fuzzer: &mut Z, + state: &mut S, + mgr: &mut EM, + input: &S::Input, + ) -> Result { + match self { + Self::Forkserver(fsrv, _) => fsrv.run_target(fuzzer, state, mgr, input), + #[cfg(feature = "nyx")] + Self::Nyx(nyx) => nyx.run_target(fuzzer, state, mgr, input), + } + } +} + +#[cfg(feature = "nyx")] +impl HasObservers for SupportedExecutors +where + OT: ObserversTuple, + S: State, + FSV: HasObservers, + NYX: HasObservers, +{ + type Observers = OT; + #[inline] + fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { + match self { + Self::Forkserver(fsrv, _) => fsrv.observers(), + #[cfg(feature = "nyx")] + Self::Nyx(nyx) => nyx.observers(), + } + } + + #[inline] + fn observers_mut(&mut self) -> RefIndexable<&mut Self::Observers, Self::Observers> { + match self { + Self::Forkserver(fsrv, _) => fsrv.observers_mut(), + #[cfg(feature = "nyx")] + Self::Nyx(nyx) => nyx.observers_mut(), + } + } +} + +#[cfg(feature = "nyx")] +impl HasTimeout for SupportedExecutors +where + FSV: HasTimeout, + NYX: HasTimeout, +{ + fn set_timeout(&mut self, timeout: std::time::Duration) { + match self { + Self::Forkserver(fsrv, _) => fsrv.set_timeout(timeout), + #[cfg(feature = "nyx")] + Self::Nyx(nyx) => nyx.set_timeout(timeout), + } + } + fn timeout(&self) -> std::time::Duration { + match self { + Self::Forkserver(fsrv, _) => fsrv.timeout(), + #[cfg(feature = "nyx")] + Self::Nyx(nyx) => nyx.timeout(), + } + } +} + +#[cfg(not(feature = "nyx"))] +impl UsesState for SupportedExecutors +where + S: State, +{ + type State = S; +} + +#[cfg(not(feature = "nyx"))] +pub enum SupportedExecutors { + Forkserver(FSV, PhantomData<(S, OT)>), +} + +#[cfg(not(feature = "nyx"))] +impl Executor for SupportedExecutors +where + S: State, + Z: UsesState, + EM: UsesState, + FSV: Executor, +{ + fn run_target( + &mut self, + fuzzer: &mut Z, + state: &mut S, + mgr: &mut EM, + input: &S::Input, + ) -> Result { + match self { + Self::Forkserver(fsrv, _) => fsrv.run_target(fuzzer, state, mgr, input), + } + } +} + +#[cfg(not(feature = "nyx"))] +impl HasObservers for SupportedExecutors +where + OT: ObserversTuple, + S: State, + FSV: HasObservers, +{ + type Observers = OT; + #[inline] + fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { + match self { + Self::Forkserver(fsrv, _) => fsrv.observers(), + } + } + + #[inline] + fn observers_mut(&mut self) -> RefIndexable<&mut Self::Observers, Self::Observers> { + match self { + Self::Forkserver(fsrv, _) => fsrv.observers_mut(), + } + } +} + +#[cfg(not(feature = "nyx"))] +impl HasTimeout for SupportedExecutors +where + FSV: HasTimeout, +{ + fn set_timeout(&mut self, timeout: std::time::Duration) { + match self { + Self::Forkserver(fsrv, _) => fsrv.set_timeout(timeout), + } + } + fn timeout(&self) -> std::time::Duration { + match self { + Self::Forkserver(fsrv, _) => fsrv.timeout(), + } + } +} diff --git a/fuzzers/forkserver/libafl-fuzz/src/feedback/filepath.rs b/fuzzers/forkserver/libafl-fuzz/src/feedback/filepath.rs new file mode 100644 index 0000000000..cebb5b9d98 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/feedback/filepath.rs @@ -0,0 +1,91 @@ +use std::{ + borrow::Cow, + path::{Path, PathBuf}, +}; + +use libafl::{ + corpus::{Corpus, Testcase}, + executors::ExitKind, + feedbacks::{Feedback, FeedbackFactory, StateInitializer}, + state::HasCorpus, +}; +use libafl_bolts::{Error, Named}; +use serde::{Deserialize, Serialize}; + +/// A [`CustomFilepathToTestcaseFeedback`] takes a closure which can set the file name and path for the testcase. +/// Is never interesting (use with an OR). +/// Note: If used as part of the `Objective` chain, then it will only apply to testcases which are +/// `Objectives`, vice versa for `Feedback`. +#[derive(Serialize, Deserialize)] +pub struct CustomFilepathToTestcaseFeedback { + /// Closure that returns the filename. + func: F, + /// The root output directory + out_dir: PathBuf, +} + +impl CustomFilepathToTestcaseFeedback { + /// Create a new [`CustomFilepathToTestcaseFeedback`]. + pub fn new(func: F, out_dir: PathBuf) -> Self { + Self { func, out_dir } + } +} + +impl FeedbackFactory, T> + for CustomFilepathToTestcaseFeedback +where + F: Clone, +{ + fn create_feedback(&self, _ctx: &T) -> CustomFilepathToTestcaseFeedback { + Self { + func: self.func.clone(), + out_dir: self.out_dir.clone(), + } + } +} + +impl Named for CustomFilepathToTestcaseFeedback { + fn name(&self) -> &Cow<'static, str> { + static NAME: Cow<'static, str> = Cow::Borrowed("CustomFilepathToTestcaseFeedback"); + &NAME + } +} + +impl StateInitializer for CustomFilepathToTestcaseFeedback {} + +impl Feedback::Input, OT, S> + for CustomFilepathToTestcaseFeedback +where + S: HasCorpus, + F: FnMut(&mut S, &mut Testcase<::Input>, &Path) -> Result<(), Error>, +{ + #[allow(clippy::wrong_self_convention)] + #[inline] + fn is_interesting( + &mut self, + _state: &mut S, + _manager: &mut EM, + _input: &::Input, + _observers: &OT, + _exit_kind: &ExitKind, + ) -> Result { + Ok(false) + } + + fn append_metadata( + &mut self, + state: &mut S, + _manager: &mut EM, + _observers: &OT, + testcase: &mut Testcase<::Input>, + ) -> Result<(), Error> { + (self.func)(state, testcase, &self.out_dir)?; + Ok(()) + } + + #[cfg(feature = "track_hit_feedbacks")] + #[inline] + fn last_result(&self) -> Result { + Ok(false) + } +} diff --git a/fuzzers/forkserver/libafl-fuzz/src/feedback/mod.rs b/fuzzers/forkserver/libafl-fuzz/src/feedback/mod.rs new file mode 100644 index 0000000000..7b824a9269 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/feedback/mod.rs @@ -0,0 +1,3 @@ +pub mod filepath; +pub mod persistent_record; +pub mod seed; diff --git a/fuzzers/forkserver/libafl-fuzz/src/feedback/persistent_record.rs b/fuzzers/forkserver/libafl-fuzz/src/feedback/persistent_record.rs new file mode 100644 index 0000000000..c8db9e440c --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/feedback/persistent_record.rs @@ -0,0 +1,120 @@ +use std::{borrow::Cow, collections::VecDeque}; + +use libafl::{ + corpus::{Corpus, Testcase}, + executors::ExitKind, + feedbacks::{Feedback, FeedbackFactory, StateInitializer}, + inputs::Input, + state::HasCorpus, +}; +use libafl_bolts::{Error, Named}; +use serde::{Deserialize, Serialize}; + +/// A [`PersitentRecordFeedback`] tracks the last N inputs that the fuzzer has run. +/// TODO: Kept in memory for now but should write to disk. +#[derive(Serialize, Deserialize)] +pub struct PersitentRecordFeedback { + /// Vec that tracks the last `record_size` [`Input`] + record: VecDeque, + record_size: usize, +} + +impl PersitentRecordFeedback { + /// Create a new [`PersitentRecordFeedback`]. + pub fn new(record_size: usize) -> Self { + Self { + record_size, + record: VecDeque::default(), + } + } +} + +impl FeedbackFactory, T> for PersitentRecordFeedback +where + I: Clone, +{ + fn create_feedback(&self, _ctx: &T) -> PersitentRecordFeedback { + Self { + record_size: self.record_size, + record: self.record.clone(), + } + } +} + +impl Named for PersitentRecordFeedback { + fn name(&self) -> &Cow<'static, str> { + static NAME: Cow<'static, str> = Cow::Borrowed("PersitentRecordFeedback"); + &NAME + } +} + +impl StateInitializer for PersitentRecordFeedback {} + +impl Feedback for PersitentRecordFeedback +where + S: HasCorpus, + S::Corpus: Corpus, + I: Input, +{ + #[allow(clippy::wrong_self_convention)] + #[inline] + fn is_interesting( + &mut self, + _state: &mut S, + _manager: &mut EM, + input: &I, + _observers: &OT, + _exit_kind: &ExitKind, + ) -> Result { + if self.should_run() { + self.record.push_back(input.clone()); + if self.record.len() == self.record_size { + drop(self.record.pop_front()); + } + } + Ok(false) + } + + fn append_metadata( + &mut self, + state: &mut S, + _manager: &mut EM, + _observers: &OT, + testcase: &mut Testcase, + ) -> Result<(), Error> { + if self.should_run() { + let file_path = testcase + .file_path() + .as_ref() + .expect("file path for the testcase must be set!"); + let file_dir = file_path + .parent() + .expect("testcase must have a parent directory!"); + // fetch the ID for this testcase + let id = state.corpus().peek_free_id().0; + let record = format!("RECORD:{id:0>6}"); + // save all inputs in our persistent record + for (i, input) in self.record.iter().enumerate() { + let filename = file_dir.join(format!("{record},cnt{i:0>6}")); + input.to_file(file_dir.join(filename))?; + } + // rewrite this current testcase's filepath + let filename = format!("RECORD:{id:0>6},cnt:{0:0>6}", self.record.len()); + *testcase.file_path_mut() = Some(file_dir.join(&filename)); + *testcase.filename_mut() = Some(filename); + } + Ok(()) + } + + #[cfg(feature = "track_hit_feedbacks")] + #[inline] + fn last_result(&self) -> Result { + Ok(false) + } +} + +impl PersitentRecordFeedback { + fn should_run(&self) -> bool { + self.record_size > 0 + } +} diff --git a/fuzzers/forkserver/libafl-fuzz/src/feedback/seed.rs b/fuzzers/forkserver/libafl-fuzz/src/feedback/seed.rs new file mode 100644 index 0000000000..f1eb674feb --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/feedback/seed.rs @@ -0,0 +1,134 @@ +use std::{borrow::Cow, marker::PhantomData}; + +use libafl::{ + corpus::Testcase, + executors::ExitKind, + feedbacks::{Feedback, StateInitializer}, + Error, +}; +use libafl_bolts::Named; + +use crate::Opt; + +/// A wrapper feedback used to determine actions for initial seeds. +/// Handles `AFL_EXIT_ON_SEED_ISSUES`, `AFL_IGNORE_SEED_ISSUES` & default afl-fuzz behavior +/// then, essentially becomes benign +#[allow(clippy::module_name_repetitions, clippy::struct_excessive_bools)] +#[derive(Debug)] +pub struct SeedFeedback { + /// Inner [`Feedback`] + pub inner: A, + ignore_timeouts: bool, + ignore_seed_issues: bool, + exit_on_seed_issues: bool, + done_loading_seeds: bool, + phantom: PhantomData, +} +impl SeedFeedback { + pub fn new(inner: A, opt: &Opt) -> Self { + Self { + inner, + ignore_timeouts: opt.ignore_timeouts, + ignore_seed_issues: opt.ignore_seed_issues, + exit_on_seed_issues: opt.exit_on_seed_issues, + done_loading_seeds: false, + phantom: PhantomData, + } + } +} + +impl StateInitializer for SeedFeedback +where + A: StateInitializer, +{ + fn init_state(&mut self, state: &mut S) -> Result<(), Error> { + self.inner.init_state(state)?; + Ok(()) + } +} + +impl Feedback for SeedFeedback +where + A: Feedback, +{ + fn is_interesting( + &mut self, + state: &mut S, + manager: &mut EM, + input: &I, + observers: &OT, + exit_kind: &ExitKind, + ) -> Result { + if !self.done_loading_seeds { + match exit_kind { + ExitKind::Timeout => { + if !self.ignore_timeouts { + if !self.ignore_seed_issues || self.exit_on_seed_issues { + return Err(Error::invalid_corpus( + "input led to a timeout; use AFL_IGNORE_SEED_ISSUES=1", + )); + } + return Ok(false); + } + } + ExitKind::Crash => { + if self.exit_on_seed_issues { + return Err(Error::invalid_corpus("input let to a crash; either omit AFL_EXIT_ON_SEED_ISSUES or set it to false.")); + } + // We regard all crashes as uninteresting during seed loading + return Ok(false); + } + _ => {} + } + } + let is_interesting = self + .inner + .is_interesting(state, manager, input, observers, exit_kind)?; + Ok(is_interesting) + } + /// Append to the testcase the generated metadata in case of a new corpus item + #[inline] + fn append_metadata( + &mut self, + state: &mut S, + manager: &mut EM, + observers: &OT, + testcase: &mut Testcase, + ) -> Result<(), Error> { + self.inner + .append_metadata(state, manager, observers, testcase)?; + Ok(()) + } + + /// Discard the stored metadata in case that the testcase is not added to the corpus + #[inline] + fn discard_metadata(&mut self, state: &mut S, input: &I) -> Result<(), Error> { + self.inner.discard_metadata(state, input)?; + Ok(()) + } + #[cfg(feature = "track_hit_feedbacks")] + fn last_result(&self) -> Result { + self.inner.last_result() + } + #[cfg(feature = "track_hit_feedbacks")] + fn append_hit_feedbacks(&self, list: &mut Vec>) -> Result<(), Error> { + if self.inner.last_result()? { + self.inner.append_hit_feedbacks(list)?; + } + Ok(()) + } +} + +impl Named for SeedFeedback { + #[inline] + fn name(&self) -> &Cow<'static, str> { + static NAME: Cow<'static, str> = Cow::Borrowed("SeedFeedback"); + &NAME + } +} + +impl SeedFeedback { + pub fn done_loading_seeds(&mut self) { + self.done_loading_seeds = true; + } +} diff --git a/fuzzers/forkserver/libafl-fuzz/src/fuzzer.rs b/fuzzers/forkserver/libafl-fuzz/src/fuzzer.rs new file mode 100644 index 0000000000..c985a81b0f --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/fuzzer.rs @@ -0,0 +1,671 @@ +use std::{ + borrow::Cow, + cell::RefCell, + marker::PhantomData, + path::{Path, PathBuf}, + rc::Rc, + time::Duration, +}; + +#[cfg(feature = "fuzzbench")] +use libafl::events::SimpleEventManager; +#[cfg(not(feature = "fuzzbench"))] +use libafl::events::{CentralizedEventManager, LlmpRestartingEventManager}; +#[cfg(feature = "fuzzbench")] +use libafl::monitors::SimpleMonitor; +use libafl::{ + corpus::{CachedOnDiskCorpus, Corpus, OnDiskCorpus}, + events::ProgressReporter, + executors::forkserver::{ForkserverExecutor, ForkserverExecutorBuilder}, + feedback_and, feedback_or, feedback_or_fast, + feedbacks::{ + CaptureTimeoutFeedback, ConstFeedback, CrashFeedback, MaxMapFeedback, TimeFeedback, + }, + fuzzer::StdFuzzer, + inputs::{BytesInput, NopTargetBytesConverter}, + mutators::{havoc_mutations, tokens_mutations, AFLppRedQueen, StdScheduledMutator, Tokens}, + observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, + schedulers::{ + powersched::{BaseSchedule, PowerSchedule}, + IndexesLenTimeMinimizerScheduler, QueueScheduler, StdWeightedScheduler, + }, + stages::{ + afl_stats::{AflStatsStage, CalibrationTime, FuzzTime, SyncTime}, + mutational::MultiMutationalStage, + time_tracker::TimeTrackingStageWrapper, + CalibrationStage, ColorizationStage, IfStage, StagesTuple, StdMutationalStage, + StdPowerMutationalStage, SyncFromDiskStage, VerifyTimeoutsStage, + }, + state::{ + HasCorpus, HasCurrentTestcase, HasExecutions, HasLastReportTime, HasStartTime, StdState, + UsesState, + }, + Error, Fuzzer, HasFeedback, HasMetadata, SerdeAny, +}; +#[cfg(not(feature = "fuzzbench"))] +use libafl_bolts::shmem::StdShMemProvider; +use libafl_bolts::{ + core_affinity::CoreId, + current_nanos, current_time, + fs::get_unique_std_input_file, + ownedref::OwnedRefMut, + rands::StdRand, + shmem::{ShMem, ShMemProvider, UnixShMemProvider}, + tuples::{tuple_list, Handled, Merge}, + AsSliceMut, +}; +#[cfg(feature = "nyx")] +use libafl_nyx::{executor::NyxExecutor, helper::NyxHelper, settings::NyxSettings}; +use libafl_targets::{cmps::AFLppCmpLogMap, AFLppCmpLogObserver, AFLppCmplogTracingStage}; +use serde::{Deserialize, Serialize}; + +use crate::{ + corpus::{set_corpus_filepath, set_solution_filepath}, + env_parser::AFL_DEFAULT_MAP_SIZE, + executor::{find_afl_binary, SupportedExecutors}, + feedback::{ + filepath::CustomFilepathToTestcaseFeedback, persistent_record::PersitentRecordFeedback, + seed::SeedFeedback, + }, + scheduler::SupportedSchedulers, + stages::mutational_stage::SupportedMutationalStages, + Opt, AFL_DEFAULT_INPUT_LEN_MAX, AFL_DEFAULT_INPUT_LEN_MIN, AFL_HARNESS_FILE_INPUT, + SHMEM_ENV_VAR, +}; + +pub type LibaflFuzzState = + StdState, StdRand, OnDiskCorpus>; + +#[cfg(not(feature = "fuzzbench"))] +type LibaflFuzzManager = CentralizedEventManager< + LlmpRestartingEventManager<(), LibaflFuzzState, StdShMemProvider>, + (), + LibaflFuzzState, + StdShMemProvider, +>; +#[cfg(feature = "fuzzbench")] +type LibaflFuzzManager = SimpleEventManager, LibaflFuzzState>; + +macro_rules! define_run_client { + ($state: ident, $mgr: ident, $fuzzer_dir: ident, $core_id: ident, $opt:ident, $is_main_node: ident, $body:block) => { + #[cfg(not(feature = "fuzzbench"))] + pub fn run_client( + $state: Option, + mut $mgr: LibaflFuzzManager, + $fuzzer_dir: &Path, + $core_id: CoreId, + $opt: &Opt, + $is_main_node: bool, + ) -> Result<(), Error> { + $body + } + #[cfg(feature = "fuzzbench")] + pub fn run_client( + $state: Option, + mut $mgr: LibaflFuzzManager, + $fuzzer_dir: &Path, + $core_id: CoreId, + $opt: &Opt, + $is_main_node: bool, + ) -> Result<(), Error> + where + F: FnMut(&str), + { + $body + } + }; +} + +define_run_client!(state, mgr, fuzzer_dir, core_id, opt, is_main_node, { + // Create the shared memory map for comms with the forkserver + let mut shmem_provider = UnixShMemProvider::new().unwrap(); + let mut shmem = shmem_provider + .new_shmem(opt.map_size.unwrap_or(AFL_DEFAULT_MAP_SIZE)) + .unwrap(); + shmem.write_to_env(SHMEM_ENV_VAR).unwrap(); + let shmem_buf = shmem.as_slice_mut(); + + // If we are in Nyx Mode, we need to use a different map observer. + #[cfg(feature = "nyx")] + let (nyx_helper, edges_observer) = { + if opt.nyx_mode { + // main node is the first core id in CentralizedLauncher + let cores = opt.cores.clone().expect("invariant; should never occur"); + let main_node_core_id = match cores.ids.len() { + 1 => None, + _ => Some(cores.ids.first().expect("invariant; should never occur").0), + }; + let nyx_settings = NyxSettings::builder() + .cpu_id(core_id.0) + .parent_cpu_id(main_node_core_id) + .build(); + let nyx_helper = NyxHelper::new(opt.executable.clone(), nyx_settings).unwrap(); + let observer = unsafe { + StdMapObserver::from_mut_ptr( + "edges", + nyx_helper.bitmap_buffer, + nyx_helper.bitmap_size, + ) + }; + (Some(nyx_helper), observer) + } else { + let observer = unsafe { StdMapObserver::new("edges", shmem_buf) }; + (None, observer) + } + }; + #[cfg(not(feature = "nyx"))] + let edges_observer = { unsafe { StdMapObserver::new("edges", shmem_buf) } }; + + let edges_observer = HitcountsMapObserver::new(edges_observer).track_indices(); + + // Create a MapFeedback for coverage guided fuzzin' + let map_feedback = MaxMapFeedback::new(&edges_observer); + + // Create the CalibrationStage; used to measure the stability of an input. + // We run the stage only if we are NOT doing sequential scheduling. + let calibration = IfStage::new( + |_, _, _, _| Ok(!opt.sequential_queue), + tuple_list!(TimeTrackingStageWrapper::::new( + CalibrationStage::new(&map_feedback) + )), + ); + + // Add user supplied dictionaries + let mut tokens = Tokens::new(); + tokens = tokens.add_from_files(&opt.dicts)?; + + // Create a AFLStatsStage; + let afl_stats_stage = AflStatsStage::builder() + .stats_file(fuzzer_dir.join("fuzzer_stats")) + .plot_file(fuzzer_dir.join("plot_data")) + .core_id(core_id) + .report_interval(Duration::from_secs(opt.stats_interval)) + .map_observer(&edges_observer) + .uses_autotokens(!opt.no_autodict) + .tokens(&tokens) + .banner(opt.executable.display().to_string()) + .version("0.13.2".to_string()) + .exec_timeout(opt.hang_timeout) + .target_mode(fuzzer_target_mode(opt).to_string()) + .build() + .expect("invariant; should never occur"); + + // Create an observation channel to keep track of the execution time. + let time_observer = TimeObserver::new("time"); + + /* + * Feedback to decide if the Input is "corpus worthy" + * We only check if it gives new coverage. + * The `TimeFeedback` is used to annotate the testcase with it's exec time. + * The `CustomFilepathToTestcaseFeedback is used to adhere to AFL++'s corpus format. + * The `Seedfeedback` is used during seed loading to adhere to AFL++'s handling of seeds + */ + let mut feedback = SeedFeedback::new( + feedback_or!( + map_feedback, + TimeFeedback::new(&time_observer), + CustomFilepathToTestcaseFeedback::new(set_corpus_filepath, fuzzer_dir.to_path_buf()) + ), + opt, + ); + + // We need to share this reference as [`VerifyTimeoutsStage`] will toggle this + // value before re-running the alleged timeouts so we don't keep capturing timeouts infinitely. + let enable_capture_timeouts = Rc::new(RefCell::new(false)); + let capture_timeout_feedback = CaptureTimeoutFeedback::new(Rc::clone(&enable_capture_timeouts)); + + // Like AFL++ we re-run all timeouts with double the timeout to assert that they are not false positives + let timeout_verify_stage = IfStage::new( + |_, _, _, _| Ok(!opt.ignore_timeouts), + tuple_list!(VerifyTimeoutsStage::new( + enable_capture_timeouts, + Duration::from_millis(opt.hang_timeout), + )), + ); + + /* + * Feedback to decide if the Input is "solution worthy". + * We check if it's a crash or a timeout (if we are configured to consider timeouts) + * The `CustomFilepathToTestcaseFeedback is used to adhere to AFL++'s corpus format. + * The `MaxMapFeedback` saves objectives only if they hit new edges + * Note: The order of the feedbacks matter! + * */ + let mut objective = feedback_or!( + feedback_and!( + feedback_or_fast!( + CrashFeedback::new(), + feedback_and!( + ConstFeedback::new(!opt.ignore_timeouts), + capture_timeout_feedback, + ) + ), + MaxMapFeedback::with_name("edges_objective", &edges_observer) + ), + CustomFilepathToTestcaseFeedback::new(set_solution_filepath, fuzzer_dir.to_path_buf()), + PersitentRecordFeedback::new(opt.persistent_record), + ); + + // Initialize our State if necessary + let mut state = state.unwrap_or_else(|| { + StdState::new( + StdRand::with_seed(opt.rng_seed.unwrap_or(current_nanos())), + // TODO: configure testcache size + CachedOnDiskCorpus::::new(fuzzer_dir.join("queue"), 1000).unwrap(), + OnDiskCorpus::::new(fuzzer_dir).unwrap(), + &mut feedback, + &mut objective, + ) + .unwrap() + }); + + // Create our Mutational Stage. + // We can either have a simple MutationalStage (for Queue scheduling) + // Or one that utilizes scheduling metadadata (Weighted Random scheduling) + let mutation = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations())); + let inner_mutational_stage = if opt.sequential_queue { + SupportedMutationalStages::StdMutational(StdMutationalStage::new(mutation), PhantomData) + } else { + SupportedMutationalStages::PowerMutational( + StdPowerMutationalStage::new(mutation), + PhantomData, + ) + }; + let mutational_stage = TimeTrackingStageWrapper::::new(inner_mutational_stage); + let strategy = opt.power_schedule.unwrap_or(BaseSchedule::EXPLORE); + + // Create our ColorizationStage + let colorization = ColorizationStage::new(&edges_observer); + + // Create our Scheduler + // Our scheduler can either be a Queue + // Or a "Weighted Random" which prioritizes entries that take less time and hit more edges + let scheduler; + if opt.sequential_queue { + scheduler = SupportedSchedulers::Queue(QueueScheduler::new(), PhantomData); + } else { + let ps = PowerSchedule::new(strategy); + let mut weighted_scheduler = + StdWeightedScheduler::with_schedule(&mut state, &edges_observer, Some(ps)); + if opt.cycle_schedules { + weighted_scheduler = weighted_scheduler.cycling_scheduler(); + } + scheduler = SupportedSchedulers::Weighted( + IndexesLenTimeMinimizerScheduler::new(&edges_observer, weighted_scheduler), + PhantomData, + ); + } + + // Create our Fuzzer + let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); + + // Set LD_PRELOAD (Linux) && DYLD_INSERT_LIBRARIES (OSX) for target. + if let Some(preload_env) = &opt.afl_preload { + std::env::set_var("LD_PRELOAD", preload_env); + std::env::set_var("DYLD_INSERT_LIBRARIES", preload_env); + } + + // Insert appropriate shared libraries if frida_mode + if opt.frida_mode { + if opt.frida_asan { + std::env::set_var("ASAN_OPTIONS", "detect_leaks=false"); + } + let frida_bin = find_afl_binary("afl-frida-trace.so", Some(opt.executable.clone()))? + .display() + .to_string(); + let preload = if let Some(preload_env) = &opt.afl_preload { + format!("{preload_env}:{frida_bin}") + } else { + frida_bin + }; + std::env::set_var("LD_PRELOAD", &preload); + std::env::set_var("DYLD_INSERT_LIBRARIES", &preload); + } + #[cfg(feature = "nyx")] + let mut executor = { + if opt.nyx_mode { + SupportedExecutors::Nyx(NyxExecutor::builder().build( + nyx_helper.unwrap(), + (edges_observer, tuple_list!(time_observer)), + )) + } else { + // Create the base Executor + let mut executor_builder = + base_forkserver_builder(opt, &mut shmem_provider, fuzzer_dir); + // Set a custom exit code to be interpreted as a Crash if configured. + if let Some(crash_exitcode) = opt.crash_exitcode { + executor_builder = executor_builder.crash_exitcode(crash_exitcode); + } + + // Enable autodict if configured + if !opt.no_autodict { + executor_builder = executor_builder.autotokens(&mut tokens); + }; + + // Finalize and build our Executor + SupportedExecutors::Forkserver( + executor_builder + .build_dynamic_map(edges_observer, tuple_list!(time_observer)) + .unwrap(), + PhantomData, + ) + } + }; + #[cfg(not(feature = "nyx"))] + let mut executor = { + // Create the base Executor + let mut executor_builder = base_forkserver_builder(opt, &mut shmem_provider, fuzzer_dir); + // Set a custom exit code to be interpreted as a Crash if configured. + if let Some(crash_exitcode) = opt.crash_exitcode { + executor_builder = executor_builder.crash_exitcode(crash_exitcode); + } + + // Enable autodict if configured + if !opt.no_autodict { + executor_builder = executor_builder.autotokens(&mut tokens); + }; + + // Finalize and build our Executor + SupportedExecutors::Forkserver( + executor_builder + .build_dynamic_map(edges_observer, tuple_list!(time_observer)) + .unwrap(), + PhantomData, + ) + }; + + let queue_dir = fuzzer_dir.join("queue"); + if opt.auto_resume { + // TODO - see afl-fuzz-init.c line 1898 onwards + } else { + // If we aren't auto resuming, copy all the files to our queue directory. + let mut id = 0; + state.walk_initial_inputs(&[opt.input_dir.clone()], |path: &PathBuf| { + let mut filename = path + .file_name() + .ok_or(Error::illegal_state(format!( + "file {} in input directory does not have a filename", + path.display() + )))? + .to_str() + .ok_or(Error::illegal_state(format!( + "file {} in input directory does not have a legal filename", + path.display() + )))? + .to_string(); + filename = format!("id:{id:0>6},time:0,execs:0,orig:{filename}"); + let cpy_res = std::fs::copy(path, queue_dir.join(filename)); + match cpy_res { + Err(e) if e.kind() == std::io::ErrorKind::InvalidInput => { + println!("skipping {} since it is not a regular file", path.display()); + } + Err(e) => return Err(e.into()), + Ok(_) => { + id += 1; + } + } + Ok(()) + })?; + } + // Load our seeds. + if state.must_load_initial_inputs() { + state + .load_initial_inputs_multicore( + &mut fuzzer, + &mut executor, + &mut mgr, + &[queue_dir], + &core_id, + opt.cores.as_ref().expect("invariant; should never occur"), + ) + .unwrap_or_else(|err| panic!("Failed to load initial corpus! {err:?}")); + println!("We imported {} inputs from disk.", state.corpus().count()); + } + + // We set IsInitialCorpusEntry as metadata for all initial testcases. + // Used in Cmplog stage if AFL_CMPLOG_ONLY_NEW. + if opt.cmplog_only_new { + for id in state.corpus().ids() { + let testcase = state.corpus().get(id).expect("should be present in Corpus"); + testcase + .borrow_mut() + .add_metadata(IsInitialCorpusEntryMetadata {}); + } + } + + // Add the tokens to State + state.add_metadata(tokens); + + // Set the start time of our Fuzzer + *state.start_time_mut() = current_time(); + + // Tell [`SeedFeedback`] that we're done loading seeds; rendering it benign. + fuzzer.feedback_mut().done_loading_seeds(); + + // Create a Sync stage to sync from foreign fuzzers + let sync_stage = IfStage::new( + |_, _, _, _| Ok(is_main_node && !opt.foreign_sync_dirs.is_empty()), + tuple_list!(TimeTrackingStageWrapper::::new( + SyncFromDiskStage::with_from_file( + opt.foreign_sync_dirs.clone(), + opt.foreign_sync_interval + ) + )), + ); + + // Create a CmpLog executor if configured. + // We only run cmplog on the main node + let cmplog_executable_path = match &opt.cmplog { + None => "-", + Some(ref p) => match p.as_str() { + "0" => opt.executable.to_str().unwrap(), + _ => p, + }, + }; + let run_cmplog = cmplog_executable_path != "-" && is_main_node; + + if run_cmplog { + // The CmpLog map shared between the CmpLog observer and CmpLog executor + let mut cmplog_shmem = shmem_provider.uninit_on_shmem::().unwrap(); + + // Let the Forkserver know the CmpLog shared memory map ID. + cmplog_shmem.write_to_env("__AFL_CMPLOG_SHM_ID").unwrap(); + let cmpmap = unsafe { OwnedRefMut::from_shmem(&mut cmplog_shmem) }; + + // Create the CmpLog observer. + let cmplog_observer = AFLppCmpLogObserver::new("cmplog", cmpmap, true); + let cmplog_ref = cmplog_observer.handle(); + + // Create the CmpLog executor. + // Cmplog has 25% execution overhead so we give it double the timeout + let cmplog_executor = base_forkserver_builder(opt, &mut shmem_provider, fuzzer_dir) + .timeout(Duration::from_millis(opt.hang_timeout * 2)) + .program(cmplog_executable_path) + .build(tuple_list!(cmplog_observer)) + .unwrap(); + + // Create the CmpLog tracing stage. + let tracing = AFLppCmplogTracingStage::new(cmplog_executor, cmplog_ref); + + // Create a randomic Input2State stage + let rq = MultiMutationalStage::new(AFLppRedQueen::with_cmplog_options(true, true)); + + // Create an IfStage and wrap the CmpLog stages in it. + // We run cmplog on the second fuzz run of the testcase. + // This stage checks if the testcase has been fuzzed more than twice, if so do not run cmplog. + // We also check if it is an initial corpus testcase + // and if run with AFL_CMPLOG_ONLY_NEW, then we avoid cmplog. + let cb = |_fuzzer: &mut _, + _executor: &mut _, + state: &mut LibaflFuzzState, + _event_manager: &mut _| + -> Result { + let testcase = state.current_testcase()?; + if testcase.scheduled_count() == 1 + && !(opt.cmplog_only_new && testcase.has_metadata::()) + { + return Ok(true); + } + Ok(false) + }; + let cmplog = IfStage::new(cb, tuple_list!(colorization, tracing, rq)); + + // The order of the stages matter! + let mut stages = tuple_list!( + calibration, + cmplog, + mutational_stage, + timeout_verify_stage, + afl_stats_stage, + sync_stage + ); + + // Run our fuzzer; WITH CmpLog + run_fuzzer_with_stages( + opt, + &mut fuzzer, + &mut stages, + &mut executor, + &mut state, + &mut mgr, + )?; + } else { + // The order of the stages matter! + let mut stages = tuple_list!( + calibration, + mutational_stage, + timeout_verify_stage, + afl_stats_stage, + sync_stage + ); + + // Run our fuzzer; NO CmpLog + run_fuzzer_with_stages( + opt, + &mut fuzzer, + &mut stages, + &mut executor, + &mut state, + &mut mgr, + )?; + } + Ok(()) + // TODO: serialize state when exiting. +}); + +fn base_forkserver_builder<'a>( + opt: &'a Opt, + shmem_provider: &'a mut UnixShMemProvider, + fuzzer_dir: &Path, +) -> ForkserverExecutorBuilder<'a, NopTargetBytesConverter, UnixShMemProvider> { + let mut executor = ForkserverExecutor::builder() + .program(opt.executable.clone()) + .coverage_map_size(opt.map_size.unwrap_or(AFL_DEFAULT_MAP_SIZE)) + .debug_child(opt.debug_child) + .is_persistent(opt.is_persistent) + .is_deferred_frksrv(opt.defer_forkserver) + .min_input_size(opt.min_input_len.unwrap_or(AFL_DEFAULT_INPUT_LEN_MIN)) + .max_input_size(opt.max_input_len.unwrap_or(AFL_DEFAULT_INPUT_LEN_MAX)) + .timeout(Duration::from_millis(opt.hang_timeout)); + if let Some(target_env) = &opt.target_env { + executor = executor.envs(target_env); + } + if opt.frida_mode || opt.unicorn_mode || opt.qemu_mode { + executor = executor.kill_signal(nix::sys::signal::Signal::SIGKILL); + } + if let Some(kill_signal) = opt.kill_signal { + executor = executor.kill_signal(kill_signal); + } + if opt.is_persistent || opt.qemu_mode || opt.unicorn_mode { + executor = executor.shmem_provider(shmem_provider); + } + // Set arguments for the target if necessary + for arg in &opt.target_args { + if arg == AFL_HARNESS_FILE_INPUT { + let mut file = get_unique_std_input_file(); + if let Some(ext) = &opt.input_ext { + file = format!("{file}.{ext}"); + } + if let Some(cur_input_dir) = &opt.cur_input_dir { + executor = executor.arg_input_file(cur_input_dir.join(file)); + } else { + executor = executor.arg_input_file(fuzzer_dir.join(file)); + } + } else { + executor = executor.arg(arg); + } + } + if opt.qemu_mode { + // We need to give the harness as the first argument to afl-qemu-trace. + executor = executor.arg(opt.executable.clone()); + executor = executor.program( + find_afl_binary("afl-qemu-trace", Some(opt.executable.clone())) + .expect("to find afl-qemu-trace"), + ); + } + executor +} + +pub fn fuzzer_target_mode(opt: &Opt) -> Cow<'static, str> { + let mut res = String::new(); + if opt.unicorn_mode { + res = format!("{res}unicorn "); + } + if opt.qemu_mode { + res = format!("{res}qemu "); + } + if opt.forkserver_cs { + res = format!("{res}coresight "); + } + if opt.no_forkserver { + res = format!("{res}no_fsrv "); + } + if opt.crash_mode { + res = format!("{res}crash "); + } + if opt.is_persistent { + res = format!("{res}persistent "); + } + // TODO: not always shmem_testcase + res = format!("{res}shmem_testcase "); + if opt.defer_forkserver { + res = format!("{res}deferred "); + } + if !(opt.unicorn_mode + || opt.qemu_mode + || opt.forkserver_cs + || opt.non_instrumented_mode + || opt.no_forkserver + || opt.crash_mode + || opt.is_persistent + || opt.defer_forkserver) + { + res = format!("{res}default"); + } + Cow::Owned(res) +} + +#[derive(Debug, Serialize, Deserialize, SerdeAny)] +pub struct IsInitialCorpusEntryMetadata {} + +pub fn run_fuzzer_with_stages( + opt: &Opt, + fuzzer: &mut Z, + stages: &mut ST, + executor: &mut E, + state: &mut ::State, + mgr: &mut EM, +) -> Result<(), Error> +where + Z: Fuzzer, + E: UsesState, + EM: ProgressReporter, + ST: StagesTuple, + ::State: HasLastReportTime + HasExecutions + HasMetadata, +{ + if opt.bench_just_one { + fuzzer.fuzz_loop_for(stages, executor, state, mgr, 1)?; + } else { + fuzzer.fuzz_loop(stages, executor, state, mgr)?; + } + Ok(()) +} diff --git a/fuzzers/forkserver/libafl-fuzz/src/hooks.rs b/fuzzers/forkserver/libafl-fuzz/src/hooks.rs new file mode 100644 index 0000000000..454d9e9ac3 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/hooks.rs @@ -0,0 +1,32 @@ +use libafl::{ + events::{Event, EventManagerHook}, + state::{State, Stoppable}, + Error, +}; +use libafl_bolts::ClientId; + +#[derive(Clone, Copy)] +pub struct LibAflFuzzEventHook { + exit_on_solution: bool, +} + +impl EventManagerHook for LibAflFuzzEventHook +where + S: State + Stoppable, +{ + fn pre_exec( + &mut self, + state: &mut S, + _client_id: ClientId, + event: &Event, + ) -> Result { + if self.exit_on_solution && matches!(event, Event::Objective { .. }) { + // TODO: dump state + state.request_stop(); + } + Ok(true) + } + fn post_exec(&mut self, _state: &mut S, _client_id: ClientId) -> Result { + Ok(true) + } +} diff --git a/fuzzers/forkserver/libafl-fuzz/src/main.rs b/fuzzers/forkserver/libafl-fuzz/src/main.rs new file mode 100644 index 0000000000..87dc0dd5b3 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/main.rs @@ -0,0 +1,399 @@ +#![forbid(unexpected_cfgs)] +#![allow(incomplete_features)] +#![warn(clippy::cargo)] +#![allow(ambiguous_glob_reexports)] +#![deny(clippy::cargo_common_metadata)] +#![deny(rustdoc::broken_intra_doc_links)] +#![deny(clippy::all)] +#![deny(clippy::pedantic)] +#![allow( + clippy::unreadable_literal, + clippy::type_repetition_in_bounds, + clippy::missing_errors_doc, + clippy::cast_possible_truncation, + clippy::used_underscore_binding, + clippy::ptr_as_ptr, + clippy::missing_panics_doc, + clippy::missing_docs_in_private_items, + clippy::module_name_repetitions, + clippy::ptr_cast_constness, + clippy::unsafe_derive_deserialize, + clippy::similar_names, + clippy::too_many_lines, + clippy::into_iter_without_iter, // broken +)] +#![cfg_attr(not(test), warn( + missing_debug_implementations, + //missing_docs, + trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + //unused_results +))] +#![cfg_attr( + test, + deny( + missing_debug_implementations, + trivial_casts, + trivial_numeric_casts, + unused_extern_crates, + unused_import_braces, + unused_qualifications, + unused_must_use, + //unused_results + ) +)] +#![cfg_attr( + test, + deny( + bad_style, + dead_code, + improper_ctypes, + non_shorthand_field_patterns, + no_mangle_generic_items, + overflowing_literals, + path_statements, + patterns_in_fns_without_body, + unconditional_recursion, + unused, + unused_allocation, + unused_comparisons, + unused_parens, + while_true + ) +)] + +use std::{collections::HashMap, path::PathBuf, time::Duration}; +mod env_parser; +mod feedback; +mod scheduler; +mod stages; +use clap::Parser; +use corpus::{check_autoresume, create_dir_if_not_exists}; +mod corpus; +mod executor; +mod fuzzer; +mod hooks; +use env_parser::parse_envs; +use fuzzer::run_client; +use libafl::{schedulers::powersched::BaseSchedule, Error}; +use libafl_bolts::core_affinity::Cores; +use nix::sys::signal::Signal; +#[cfg(not(feature = "fuzzbench"))] +use { + corpus::remove_main_node_file, + libafl::{ + events::{CentralizedLauncher, ClientDescription, EventConfig}, + monitors::MultiMonitor, + }, + libafl_bolts::shmem::{ShMemProvider, StdShMemProvider}, +}; +#[cfg(feature = "fuzzbench")] +use { + libafl::{events::SimpleEventManager, monitors::SimpleMonitor}, + libafl_bolts::core_affinity::CoreId, +}; + +const AFL_DEFAULT_INPUT_LEN_MAX: usize = 1_048_576; +const AFL_DEFAULT_INPUT_LEN_MIN: usize = 1; +const OUTPUT_GRACE: u64 = 25; +pub const AFL_DEFAULT_BROKER_PORT: u16 = 1337; +const PERSIST_SIG: &str = "##SIG_AFL_PERSISTENT##\0"; +const DEFER_SIG: &str = "##SIG_AFL_DEFER_FORKSRV##\0"; +const SHMEM_ENV_VAR: &str = "__AFL_SHM_ID"; +static AFL_HARNESS_FILE_INPUT: &str = "@@"; + +#[allow(clippy::too_many_lines)] +fn main() { + env_logger::init(); + let mut opt = Opt::parse(); + parse_envs(&mut opt).expect("invalid configuration"); + executor::check_binary(&mut opt, SHMEM_ENV_VAR).expect("binary to be valid"); + + // Create the shared memory map provider for LLMP + #[cfg(not(feature = "fuzzbench"))] + let shmem_provider = StdShMemProvider::new().unwrap(); + + // Create our Monitor + #[cfg(not(feature = "fuzzbench"))] + let monitor = MultiMonitor::new(|s| println!("{s}")); + #[cfg(feature = "fuzzbench")] + let monitor = SimpleMonitor::new(|s| println!("{}", s)); + + opt.auto_resume = if opt.auto_resume { + true + } else { + opt.input_dir.as_os_str() == "-" + }; + + create_dir_if_not_exists(&opt.output_dir).expect("could not create output directory"); + + // TODO: we need to think about the fuzzer naming scheme since they can be configured in + // different ways (ASAN/mutators) etc.... and how to autoresume appropriately. + // Currently we do AFL style resume with hardcoded names. + // Currently, we will error if we don't find our assigned dir. + // This will also not work if we use core 1-8 and then later, 16-24 + // since fuzzer names are using core_ids + #[cfg(not(feature = "fuzzbench"))] + let res = CentralizedLauncher::builder() + .shmem_provider(shmem_provider) + .configuration(EventConfig::from_name("default")) + .monitor(monitor) + .main_run_client( + |state: Option<_>, mgr: _, client_description: ClientDescription| { + println!( + "run primary client with id {} on core {}", + client_description.id(), + client_description.core_id().0 + ); + let fuzzer_dir = opt.output_dir.join("fuzzer_main"); + let _ = check_autoresume(&fuzzer_dir, opt.auto_resume).unwrap(); + let res = run_client( + state, + mgr, + &fuzzer_dir, + client_description.core_id(), + &opt, + true, + ); + let _ = remove_main_node_file(&fuzzer_dir); + res + }, + ) + .secondary_run_client( + |state: Option<_>, mgr: _, client_description: ClientDescription| { + println!( + "run secondary client with id {} on core {}", + client_description.id(), + client_description.core_id().0 + ); + let fuzzer_dir = opt + .output_dir + .join(format!("fuzzer_secondary_{}", client_description.id())); + let _ = check_autoresume(&fuzzer_dir, opt.auto_resume).unwrap(); + run_client( + state, + mgr, + &fuzzer_dir, + client_description.core_id(), + &opt, + false, + ) + }, + ) + .cores(&opt.cores.clone().expect("invariant; should never occur")) + .broker_port(opt.broker_port.unwrap_or(AFL_DEFAULT_BROKER_PORT)) + .build() + .launch(); + #[cfg(feature = "fuzzbench")] + let res = { + let fuzzer_dir = opt.output_dir.join("fuzzer_main"); + let _ = check_autoresume(&fuzzer_dir, opt.auto_resume).unwrap(); + let mgr = SimpleEventManager::new(monitor); + let res = run_client(None, mgr, &fuzzer_dir, CoreId(0), &opt, true); + res + }; + match res { + Ok(()) => unreachable!(), + Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."), + Err(err) => panic!("Failed to run launcher: {err:?}"), + }; +} + +#[allow(clippy::struct_excessive_bools)] +#[derive(Debug, Parser, Clone)] +#[command( + name = "afl-fuzz", + about = "afl-fuzz, now with LibAFL!", + author = "aarnav " +)] +/// The Configuration +struct Opt { + executable: PathBuf, + target_args: Vec, + + // NOTE: afl-fuzz does not accept multiple input directories + #[arg(short = 'i')] + input_dir: PathBuf, + #[arg(short = 'o')] + output_dir: PathBuf, + /// file extension for the fuzz test input file (if needed) + #[arg(short = 'e')] + input_ext: Option, + /// use a fixed seed for the RNG + #[arg(short = 's')] + rng_seed: Option, + /// power schedules compute a seed's performance score: explore(default), fast, exploit, seek, rare, mmopt, coe, lin + #[arg(short = 'p')] + power_schedule: Option, + /// enable `CmpLog` by specifying a binary compiled for it. + #[arg(short = 'c')] + cmplog: Option, + /// sync to a foreign fuzzer queue directory (requires -M, can be specified up to 32 times) + #[arg(short = 'F')] + foreign_sync_dirs: Vec, + /// fuzzer dictionary (see README.md) + #[arg(short = 'x')] + dicts: Vec, + // Environment + CLI variables + #[arg(short = 'G')] + max_input_len: Option, + #[arg(short = 'g')] + min_input_len: Option, + /// sequential queue selection instead of weighted random + #[arg(short = 'Z')] + sequential_queue: bool, + // TODO: enforce + #[arg(short = 'm')] + memory_limit: Option, + // TODO: enforce + #[arg(short = 'V')] + fuzz_for_seconds: Option, + + /// timeout for each run + #[arg(short = 't', default_value_t = 1000)] + hang_timeout: u64, + + // Environment Variables + #[clap(skip)] + bench_just_one: bool, + #[clap(skip)] + bench_until_crash: bool, + + #[clap(skip)] + debug_child: bool, + #[clap(skip)] + is_persistent: bool, + #[clap(skip)] + no_autodict: bool, + #[clap(skip)] + kill_signal: Option, + #[clap(skip)] + map_size: Option, + #[clap(skip)] + ignore_timeouts: bool, + #[clap(skip)] + cur_input_dir: Option, + #[clap(skip)] + crash_exitcode: Option, + #[clap(skip)] + target_env: Option>, + #[clap(skip)] + cycle_schedules: bool, + #[clap(skip)] + cmplog_only_new: bool, + #[clap(skip)] + afl_preload: Option, + #[clap(skip)] + auto_resume: bool, + #[clap(skip)] + skip_bin_check: bool, + #[clap(skip)] + defer_forkserver: bool, + /// in seconds + #[clap(skip)] + stats_interval: u64, + + // New Environment Variables + #[clap(skip)] + cores: Option, + #[clap(skip)] + broker_port: Option, + + // Seed config + #[clap(skip)] + exit_on_seed_issues: bool, + // renamed from IGNORE_SEED_PROBLEMS + #[clap(skip)] + ignore_seed_issues: bool, + #[clap(skip)] + crash_seed_as_new_crash: bool, + + // Cmplog config + // TODO: actually use this config + #[arg(short='l', value_parser=parse_cmplog_args)] + cmplog_opts: Option, + + #[clap(skip)] + foreign_sync_interval: Duration, + #[clap(skip)] + persistent_record: usize, + + // TODO: + #[clap(skip)] + frida_persistent_addr: Option, + #[clap(skip)] + qemu_custom_bin: bool, + #[clap(skip)] + cs_custom_bin: bool, + /// use qemu-based instrumentation with Wine (Wine mode) + #[arg(short = 'W')] + wine_mode: bool, + #[clap(skip)] + uses_asan: bool, + /// use binary-only instrumentation (FRIDA mode) + #[arg(short = 'O')] + frida_mode: bool, + #[clap(skip)] + frida_asan: bool, + /// use binary-only instrumentation (QEMU mode) + #[arg(short = 'Q')] + qemu_mode: bool, + /// Nyx mode (Note: unlike AFL++, you do not need to specify -Y for parallel nyx fuzzing) + #[cfg(feature = "nyx")] + #[arg(short = 'X')] + nyx_mode: bool, + /// use unicorn-based instrumentation (Unicorn mode) + #[arg(short = 'U')] + unicorn_mode: bool, + #[clap(skip)] + forkserver_cs: bool, + #[clap(skip)] + no_forkserver: bool, + #[clap(skip)] + crash_mode: bool, + #[clap(skip)] + non_instrumented_mode: bool, +} + +#[allow(dead_code, clippy::struct_excessive_bools)] +#[derive(Debug, Clone)] +pub struct CmplogOpts { + file_size: CmplogFileSize, + arith_solving: bool, + transform_solving: bool, + exterme_transform_solving: bool, + random_colorization: bool, +} + +#[derive(Debug, Clone)] +pub enum CmplogFileSize { + Small, + Larger, + All, +} + +impl From<&str> for CmplogFileSize { + fn from(value: &str) -> Self { + if value.contains('1') { + Self::Small + } else if value.contains('3') { + Self::All + } else { + Self::Larger + } + } +} + +#[allow(clippy::unnecessary_wraps)] // we need to be compatible with Clap's value_parser +fn parse_cmplog_args(s: &str) -> Result { + Ok(CmplogOpts { + file_size: s.into(), + arith_solving: s.contains('A'), + transform_solving: s.contains('T'), + exterme_transform_solving: s.contains('X'), + random_colorization: s.contains('R'), + }) +} diff --git a/fuzzers/forkserver/libafl-fuzz/src/scheduler.rs b/fuzzers/forkserver/libafl-fuzz/src/scheduler.rs new file mode 100644 index 0000000000..fa50cdf338 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/scheduler.rs @@ -0,0 +1,123 @@ +use std::marker::PhantomData; + +use libafl::{ + corpus::{Corpus, CorpusId, HasTestcase, SchedulerTestcaseMetadata, Testcase}, + schedulers::{HasQueueCycles, RemovableScheduler, Scheduler}, + state::HasCorpus, + Error, HasMetadata, +}; +use libafl_bolts::tuples::MatchName; + +pub enum SupportedSchedulers { + Queue(Q, PhantomData), + Weighted(W, PhantomData), +} + +impl RemovableScheduler<::Input, S> for SupportedSchedulers +where + Q: Scheduler<::Input, S> + + RemovableScheduler<::Input, S>, + W: Scheduler<::Input, S> + + RemovableScheduler<::Input, S>, + S: HasCorpus + HasTestcase, +{ + fn on_remove( + &mut self, + state: &mut S, + id: CorpusId, + testcase: &Option::Input>>, + ) -> Result<(), Error> { + match self { + Self::Queue(queue, _) => queue.on_remove(state, id, testcase), + Self::Weighted(weighted, _) => weighted.on_remove(state, id, testcase), + } + } + + fn on_replace( + &mut self, + state: &mut S, + id: CorpusId, + prev: &Testcase<::Input>, + ) -> Result<(), Error> { + match self { + Self::Queue(queue, _) => queue.on_replace(state, id, prev), + Self::Weighted(weighted, _) => weighted.on_replace(state, id, prev), + } + } +} + +impl Scheduler<::Input, S> for SupportedSchedulers +where + Q: Scheduler<::Input, S>, + W: Scheduler<::Input, S>, + S: HasCorpus + HasTestcase, +{ + fn on_add(&mut self, state: &mut S, id: CorpusId) -> Result<(), Error> { + match self { + // We need to manually set the depth + // since we want to avoid implementing `AflScheduler` for `QueueScheduler` + Self::Queue(queue, _) => { + queue.on_add(state, id)?; + let current_id = *state.corpus().current(); + let mut depth = match current_id { + Some(parent_idx) => state + .testcase(parent_idx)? + .metadata::()? + .depth(), + None => 0, + }; + depth += 1; + let mut testcase = state.corpus().get(id)?.borrow_mut(); + testcase.add_metadata(SchedulerTestcaseMetadata::new(depth)); + Ok(()) + } + Self::Weighted(weighted, _) => weighted.on_add(state, id), + } + } + + /// Gets the next entry in the queue + fn next(&mut self, state: &mut S) -> Result { + match self { + Self::Queue(queue, _) => queue.next(state), + Self::Weighted(weighted, _) => weighted.next(state), + } + } + fn on_evaluation( + &mut self, + state: &mut S, + input: &::Input, + observers: &OTB, + ) -> Result<(), Error> + where + OTB: MatchName, + { + match self { + Self::Queue(queue, _) => queue.on_evaluation(state, input, observers), + Self::Weighted(weighted, _) => weighted.on_evaluation(state, input, observers), + } + } + + fn set_current_scheduled( + &mut self, + state: &mut S, + next_id: Option, + ) -> Result<(), Error> { + match self { + Self::Queue(queue, _) => queue.set_current_scheduled(state, next_id), + Self::Weighted(weighted, _) => weighted.set_current_scheduled(state, next_id), + } + } +} + +impl HasQueueCycles for SupportedSchedulers +where + Q: HasQueueCycles, + W: HasQueueCycles, +{ + fn queue_cycles(&self) -> u64 { + match self { + Self::Queue(queue, _) => queue.queue_cycles(), + Self::Weighted(weighted, _) => weighted.queue_cycles(), + } + } +} diff --git a/fuzzers/forkserver/libafl-fuzz/src/stages/mod.rs b/fuzzers/forkserver/libafl-fuzz/src/stages/mod.rs new file mode 100644 index 0000000000..ee139c48c8 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/stages/mod.rs @@ -0,0 +1 @@ +pub mod mutational_stage; diff --git a/fuzzers/forkserver/libafl-fuzz/src/stages/mutational_stage.rs b/fuzzers/forkserver/libafl-fuzz/src/stages/mutational_stage.rs new file mode 100644 index 0000000000..546d2de1a6 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/src/stages/mutational_stage.rs @@ -0,0 +1,120 @@ +use std::{borrow::Cow, marker::PhantomData}; + +use libafl::{ + corpus::Corpus, + inputs::Input, + mutators::Mutator, + stages::{mutational::MutatedTransform, MutationalStage, Stage}, + state::{HasCorpus, HasRand, State, UsesState}, + Error, Evaluator, HasNamedMetadata, +}; +use libafl_bolts::Named; + +#[derive(Debug)] +pub enum SupportedMutationalStages { + StdMutational(SM, PhantomData<(S, I, M, EM, Z, E)>), + PowerMutational(P, PhantomData<(S, I, M, EM, Z, E)>), +} + +impl MutationalStage + for SupportedMutationalStages +where + E: UsesState, + EM: UsesState, + M: Mutator, + Z: Evaluator, + I: MutatedTransform + Clone + Input, + SM: MutationalStage, + P: MutationalStage, + S: State + HasRand + HasCorpus + HasNamedMetadata, + <::State as HasCorpus>::Corpus: Corpus, //delete me +{ + /// The mutator, added to this stage + #[inline] + fn mutator(&self) -> &M { + match self { + Self::StdMutational(m, _) => m.mutator(), + Self::PowerMutational(p, _) => p.mutator(), + } + } + + /// The list of mutators, added to this stage (as mutable ref) + #[inline] + fn mutator_mut(&mut self) -> &mut M { + match self { + Self::StdMutational(m, _) => m.mutator_mut(), + Self::PowerMutational(p, _) => p.mutator_mut(), + } + } + + /// Gets the number of iterations as a random number + fn iterations(&self, state: &mut S) -> Result { + match self { + Self::StdMutational(m, _) => m.iterations(state), + Self::PowerMutational(p, _) => p.iterations(state), + } + } +} + +impl UsesState for SupportedMutationalStages +where + S: State + HasRand, +{ + type State = S; +} + +impl Named for SupportedMutationalStages +where + SM: Named, + P: Named, +{ + fn name(&self) -> &Cow<'static, str> { + match self { + Self::StdMutational(m, _) => m.name(), + Self::PowerMutational(p, _) => p.name(), + } + } +} + +impl Stage + for SupportedMutationalStages +where + E: UsesState, + EM: UsesState, + M: Mutator, + Z: Evaluator, + I: MutatedTransform + Clone + Input, + SM: MutationalStage, + P: MutationalStage, + S: State + HasRand + HasCorpus + HasNamedMetadata, + <::State as HasCorpus>::Corpus: Corpus, //delete me +{ + #[inline] + #[allow(clippy::let_and_return)] + fn perform( + &mut self, + fuzzer: &mut Z, + executor: &mut E, + state: &mut S, + manager: &mut EM, + ) -> Result<(), Error> { + match self { + Self::StdMutational(m, _) => m.perform(fuzzer, executor, state, manager), + Self::PowerMutational(p, _) => p.perform(fuzzer, executor, state, manager), + } + } + + fn should_restart(&mut self, state: &mut S) -> Result { + match self { + Self::StdMutational(m, _) => m.should_restart(state), + Self::PowerMutational(p, _) => p.should_restart(state), + } + } + + fn clear_progress(&mut self, state: &mut S) -> Result<(), Error> { + match self { + Self::StdMutational(m, _) => m.clear_progress(state), + Self::PowerMutational(p, _) => p.clear_progress(state), + } + } +} diff --git a/fuzzers/forkserver/libafl-fuzz/test/seeds-frida/init b/fuzzers/forkserver/libafl-fuzz/test/seeds-frida/init new file mode 100644 index 0000000000..5c0ac06f55 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/test/seeds-frida/init @@ -0,0 +1 @@ +00000 diff --git a/fuzzers/forkserver/libafl-fuzz/test/seeds/init b/fuzzers/forkserver/libafl-fuzz/test/seeds/init new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/test/seeds/init @@ -0,0 +1 @@ +0 diff --git a/fuzzers/forkserver/libafl-fuzz/test/seeds_cmplog/init b/fuzzers/forkserver/libafl-fuzz/test/seeds_cmplog/init new file mode 100644 index 0000000000..df08510a83 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/test/seeds_cmplog/init @@ -0,0 +1 @@ +uC \ No newline at end of file diff --git a/fuzzers/forkserver/libafl-fuzz/test/seeds_frida/init b/fuzzers/forkserver/libafl-fuzz/test/seeds_frida/init new file mode 100644 index 0000000000..5c0ac06f55 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/test/seeds_frida/init @@ -0,0 +1 @@ +00000 diff --git a/fuzzers/forkserver/libafl-fuzz/test/seeds_qemu/init b/fuzzers/forkserver/libafl-fuzz/test/seeds_qemu/init new file mode 100644 index 0000000000..5c0ac06f55 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/test/seeds_qemu/init @@ -0,0 +1 @@ +00000 diff --git a/fuzzers/forkserver/libafl-fuzz/test/seeds_unicorn/in b/fuzzers/forkserver/libafl-fuzz/test/seeds_unicorn/in new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/test/seeds_unicorn/in @@ -0,0 +1 @@ +0 diff --git a/fuzzers/forkserver/libafl-fuzz/test/seeds_unicorn_cmpcov/in b/fuzzers/forkserver/libafl-fuzz/test/seeds_unicorn_cmpcov/in new file mode 100644 index 0000000000..40fdece9d2 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/test/seeds_unicorn_cmpcov/in @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/fuzzers/forkserver/libafl-fuzz/test/test-cmpcov.c b/fuzzers/forkserver/libafl-fuzz/test/test-cmpcov.c new file mode 100644 index 0000000000..eb0eb4fbc1 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/test/test-cmpcov.c @@ -0,0 +1,56 @@ +#include +#include +#include +#include + +char global_cmpval[] = "GLOBALVARIABLE"; + +int main(int argc, char **argv) { + char *input = argv[1], *buf, buffer[20]; + char cmpval[] = "LOCALVARIABLE"; + char shortval[4] = "abc"; + + if (argc < 2) { + ssize_t ret = read(0, buffer, sizeof(buffer) - 1); + buffer[ret] = 0; + input = buffer; + } + + if (strcmp(input, "LIBTOKENCAP") == 0) + printf("your string was LIBTOKENCAP\n"); + else if (strcmp(input, "BUGMENOT") == 0) + printf("your string was BUGMENOT\n"); + else if (strncmp(input, "BANANA", 3) == 0) + printf("your string started with BAN\n"); + else if (strcmp(input, "APRI\0COT") == 0) + printf("your string was APRI\n"); + else if (strcasecmp(input, "Kiwi") == 0) + printf("your string was Kiwi\n"); + else if (strncasecmp(input, "avocado", 9) == 0) + printf("your string was avocado\n"); + else if (strncasecmp(input, "Grapes", argc > 2 ? atoi(argv[2]) : 3) == 0) + printf("your string was a prefix of Grapes\n"); + else if (strstr(input, "tsala") != NULL) + printf("your string is a fruit salad\n"); + else if (strcmp(input, "BUFFEROVERFLOW") == 0) { + buf = (char *)malloc(16); + strcpy(buf, "TEST"); + strcat(buf, input); + printf("This will only crash with libdislocator: %s\n", buf); + + } else if (*(unsigned int *)input == 0xabadcafe) + + printf("GG you eat cmp tokens for breakfast!\n"); + else if (memcmp(cmpval, input, 8) == 0) + printf("local var memcmp works!\n"); + else if (memcmp(shortval, input, 4) == 0) + printf("short local var memcmp works!\n"); + else if (memcmp(global_cmpval, input, sizeof(global_cmpval)) == 0) + printf("global var memcmp works!\n"); + else if (strncasecmp("-h", input, 2) == 0) + printf("this is not the help you are looking for\n"); + else + printf("I do not know your string\n"); + + return 0; +} diff --git a/fuzzers/forkserver/libafl-fuzz/test/test-cmplog.c b/fuzzers/forkserver/libafl-fuzz/test/test-cmplog.c new file mode 100644 index 0000000000..60e981a44c --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/test/test-cmplog.c @@ -0,0 +1,31 @@ +#include +#include +#include +#include +#include +#include +#include + +int LLVMFuzzerTestOneInput(const uint8_t *buf, size_t i) { + if (i < 15) return -1; + if (buf[0] != 'A') return 0; + int *icmp = (int *)(buf + 1); + if (*icmp != 0x69694141) return 0; + if (memcmp(buf + 5, "1234EF", 6) == 0) abort(); + return 0; +} + +#ifdef __AFL_COMPILER +int main(int argc, char *argv[]) { + unsigned char buf[1024]; + ssize_t i; + while (__AFL_LOOP(1000)) { + i = read(0, (char *)buf, sizeof(buf) - 1); + if (i > 0) buf[i] = 0; + LLVMFuzzerTestOneInput(buf, i); + } + + return 0; +} + +#endif diff --git a/fuzzers/forkserver/libafl-fuzz/test/test-instr.c b/fuzzers/forkserver/libafl-fuzz/test/test-instr.c new file mode 100644 index 0000000000..a71baf4c19 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/test/test-instr.c @@ -0,0 +1,69 @@ +/* + american fuzzy lop++ - a trivial program to test the build + -------------------------------------------------------- + Originally written by Michal Zalewski + Copyright 2014 Google Inc. All rights reserved. + Copyright 2019-2024 AFLplusplus Project. All rights reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + https://www.apache.org/licenses/LICENSE-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef TEST_SHARED_OBJECT + #define main main_exported +#endif + +int main(int argc, char **argv) { + int fd = 0, cnt; + char buff[8]; + char *buf = buff; + + // we support command line parameter and stdin + if (argc == 2) { + buf = argv[1]; + + } else { + if (argc >= 3 && strcmp(argv[1], "-f") == 0) { + if ((fd = open(argv[2], O_RDONLY)) < 0) { + fprintf(stderr, "Error: unable to open %s\n", argv[2]); + exit(-1); + } + } + + if ((cnt = read(fd, buf, sizeof(buf) - 1)) < 1) { + printf("Hum?\n"); + return 1; + } + + buf[cnt] = 0; + } + + if (getenv("AFL_DEBUG")) fprintf(stderr, "test-instr: %s\n", buf); + + // we support three input cases (plus a 4th if stdin is used but there is no + // input) + switch (buf[0]) { + case '0': + printf("Looks like a zero to me!\n"); + break; + + case '1': + printf("Pretty sure that is a one!\n"); + break; + + default: + printf("Neither one or zero? How quaint!\n"); + break; + } + + return 0; +} diff --git a/fuzzers/forkserver/libafl-fuzz/test/test.sh b/fuzzers/forkserver/libafl-fuzz/test/test.sh new file mode 100755 index 0000000000..4ae8c497f5 --- /dev/null +++ b/fuzzers/forkserver/libafl-fuzz/test/test.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +export AFL_DIR_NAME="./AFLplusplus-stable" +export AFL_CC_PATH="$AFL_DIR_NAME/afl-clang-fast" +export LIBAFL_FUZZ_PATH="../target/release/libafl-fuzz" +export LLVM_CONFIG="llvm-config-18" +if [ ! -d "$AFL_DIR_NAME" ]; then + wget https://github.com/AFLplusplus/AFLplusplus/archive/refs/heads/stable.zip + unzip stable.zip + cd $AFL_DIR_NAME + LLVM_CONFIG=$LLVM_CONFIG make + cd .. +fi + +cargo build --release + + +AFL_PATH=$AFL_DIR_NAME $AFL_CC_PATH $AFL_DIR_NAME/test-instr.c -o out-instr + +AFL_CORES=1 LLVM_CONFIG=${LLVM_CONFIG} AFL_STATS_INTERVAL=1 AFL_NUM_CORES=1 timeout 5 $LIBAFL_FUZZ_PATH -i ./seeds -o ./output $(pwd)/out-instr +test -n "$( ls output/fuzzer_main/queue/id:000002* 2>/dev/null )" || exit 1 +test -n "$( ls output/fuzzer_main/fuzzer_stats 2>/dev/null )" || exit 1 +test -n "$( ls output/fuzzer_main/plot_data 2>/dev/null )" || exit 1 +test -n "$( ls output/fuzzer_main/crashe2s 2>/dev/null )" || exit 1 +test -n "$( ls output/fuzzer_main/hangs 2>/dev/null )" || exit 1 diff --git a/fuzzers/forkserver_libafl_cc/Cargo.toml b/fuzzers/forkserver_libafl_cc/Cargo.toml deleted file mode 100644 index 9c51faeeae..0000000000 --- a/fuzzers/forkserver_libafl_cc/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "forkserver_libafl_cc" -version = "0.8.2" -authors = ["ergrelet "] -edition = "2021" - -[features] -default = ["std"] -std = [] -# Forces a crash -crash = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = { version = "6.0" } - -[dependencies] -clap = { version = "4.0", features = ["derive"] } -nix = { version = "0.29", features = ["signal"] } -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_cc = { path = "../../libafl_cc/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer", "pointer_maps"] } -env_logger = "0.11" - -[lib] -name = "libforkserver_libafl_cc" -crate-type = ["staticlib"] diff --git a/fuzzers/forkserver_simple/Cargo.toml b/fuzzers/forkserver_simple/Cargo.toml deleted file mode 100644 index 015a0176ae..0000000000 --- a/fuzzers/forkserver_simple/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "forkserver_simple" -version = "0.13.0" -authors = ["tokatoka "] -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 - -[dependencies] -env_logger = "0.11" -libafl = { path = "../../libafl/", features = ["std", "derive"] } -libafl_bolts = { path = "../../libafl_bolts/" } -clap = { version = "4.0", features = ["derive"] } -nix = { version = "0.29", features = ["signal"] } diff --git a/fuzzers/frida_executable_libpng/Cargo.toml b/fuzzers/frida_executable_libpng/Cargo.toml deleted file mode 100644 index ce3101a951..0000000000 --- a/fuzzers/frida_executable_libpng/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "frida_executable_fuzzer" -version = "0.1.0" -edition = "2021" - -[lib] -name = "frida_executable_fuzzer" -crate_type = ["cdylib"] - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/", features = [ "std", "llmp_compression", "llmp_bind_public", "frida_cli" ] } #, "llmp_small_maps", "llmp_debug"]} -libafl_bolts = { path = "../../libafl_bolts/" } -frida-gum = { version = "0.13.6", features = [ "auto-download", "event-sink", "invocation-listener"] } -libafl_frida = { path = "../../libafl_frida", features = ["cmplog"] } -libafl_targets = { path = "../../libafl_targets", features = ["sancov_cmplog"] } -libc = "0.2" -libloading = "0.7" -num-traits = "0.2" -rangemap = "1.3" -clap = { version = "4.0", features = ["derive"] } -serde = "1.0" -mimalloc = { version = "*", default-features = false } - -backtrace = "0.3" -color-backtrace = "0.5" diff --git a/fuzzers/frida_gdiplus/Cargo.toml b/fuzzers/frida_gdiplus/Cargo.toml deleted file mode 100644 index e9ee61437c..0000000000 --- a/fuzzers/frida_gdiplus/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "frida_gdiplus" -version = "0.13.0" -authors = ["Richard Johnson "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/", features = [ "std", "llmp_compression", - "llmp_bind_public", "frida_cli", "errors_backtrace" ] } #, "llmp_small_maps", "llmp_debug"]} -libafl_bolts = { path = "../../libafl_bolts/" } -frida-gum = { version = "0.13.6", features = ["auto-download", "event-sink", "invocation-listener"] } -libafl_frida = { path = "../../libafl_frida", features = ["cmplog"] } -libafl_targets = { path = "../../libafl_targets", features = ["sancov_cmplog"] } -libloading = "0.7" -mimalloc = { version = "*", default-features = false } -dlmalloc ={version = "0.2.6", features = ["global"]} -color-backtrace = "0.5" -env_logger = "0.10.0" -iced-x86 = { version = "1.20.0", features = ["code_asm"] } diff --git a/fuzzers/frida_gdiplus/README.md b/fuzzers/frida_gdiplus/README.md deleted file mode 100644 index 7a0cdf6924..0000000000 --- a/fuzzers/frida_gdiplus/README.md +++ /dev/null @@ -1,9 +0,0 @@ -## Build - -To build this example, run `cargo build --release` in this folder. - -Then compile the harness `cl.exe /LD harness.cc /link /dll gdiplus.lib ole32.lib` - -## Run - -To run the example `target\release\frida_gdiplus.exe -H harness.dll -i corpus -o output --libs-to-instrument gdi32.dll --libs-to-instrument gdi32full.dll --libs-to-instrument gdiplus.dll --libs-to-instrument WindowsCodecs.dll --disable-excludes` diff --git a/fuzzers/frida_libpng/Cargo.toml b/fuzzers/frida_libpng/Cargo.toml deleted file mode 100644 index b401c47fb7..0000000000 --- a/fuzzers/frida_libpng/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "frida_fuzzer" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/", features = [ "std", "llmp_compression", - "llmp_bind_public", "frida_cli", "errors_backtrace" ] } #, "llmp_small_maps", "llmp_debug"]} -libafl_bolts = { path = "../../libafl_bolts/" } -frida-gum = { version = "0.13.6", features = ["auto-download", "event-sink", "invocation-listener"] } -libafl_frida = { path = "../../libafl_frida", features = ["cmplog"] } -libafl_targets = { path = "../../libafl_targets", features = ["sancov_cmplog"] } -libloading = "0.7" -mimalloc = { version = "*", default-features = false } -color-backtrace = "0.5" -log = "0.4.20" -env_logger = "0.10.0" diff --git a/fuzzers/full_system/nyx_libxml2_parallel/Cargo.toml b/fuzzers/full_system/nyx_libxml2_parallel/Cargo.toml new file mode 100644 index 0000000000..8199ad78a0 --- /dev/null +++ b/fuzzers/full_system/nyx_libxml2_parallel/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "nyx_libxml2_parallel" +version = "0.14.1" +edition = "2021" +default-run = "nyx_libxml2_parallel" + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_cc = { path = "../../../libafl_cc" } +libafl_nyx = { path = "../../../libafl_nyx" } + +log = { version = "0.4.22", features = ["release_max_level_info"] } + +[profile.release] +codegen-units = 1 +opt-level = 3 diff --git a/fuzzers/nyx_libxml2_standalone/Makefile.toml b/fuzzers/full_system/nyx_libxml2_parallel/Makefile.toml similarity index 81% rename from fuzzers/nyx_libxml2_standalone/Makefile.toml rename to fuzzers/full_system/nyx_libxml2_parallel/Makefile.toml index 904359f285..f3283a2767 100644 --- a/fuzzers/nyx_libxml2_standalone/Makefile.toml +++ b/fuzzers/full_system/nyx_libxml2_parallel/Makefile.toml @@ -1,20 +1,20 @@ # Variables [env] -FUZZER_NAME='nyx_libxml2_standalone' +FUZZER_NAME = 'nyx_libxml2_parallel' PROJECT_DIR = { script = ["pwd"] } [config] skip_core_tasks = true # skip `cargo test` to avoid error [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this platform" ''' [tasks.build] -dependencies = [ "libxml2" ] +dependencies = ["libxml2"] [tasks.libxml2] linux_alias = "libxml2_unix" @@ -23,14 +23,14 @@ windows_alias = "unsupported" [tasks.libxml2_unix] # condition = { files_not_exist = ["./libxml2"]} -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' ./setup_libxml2.sh ''' [tasks.enable_kvm_vmware_hypercall] script_runner = "@shell" -script=''' +script = ''' if [ ! -e /sys/module/kvm/parameters/enable_vmware_backdoor ] || ! grep -qF Y /sys/module/kvm/parameters/enable_vmware_backdoor; then sudo modprobe -r kvm-intel # or kvm-amd for AMD @@ -48,10 +48,10 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' cargo run ''' -dependencies = [ "libxml2", "enable_kvm_vmware_hypercall" ] +dependencies = ["libxml2", "enable_kvm_vmware_hypercall"] # Clean up [tasks.clean] @@ -62,8 +62,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' make -C ./libxml2 clean cargo clean ''' diff --git a/fuzzers/nyx_libxml2_parallel/README.md b/fuzzers/full_system/nyx_libxml2_parallel/README.md similarity index 100% rename from fuzzers/nyx_libxml2_parallel/README.md rename to fuzzers/full_system/nyx_libxml2_parallel/README.md diff --git a/fuzzers/nyx_libxml2_standalone/setup_libxml2.sh b/fuzzers/full_system/nyx_libxml2_parallel/setup_libxml2.sh similarity index 87% rename from fuzzers/nyx_libxml2_standalone/setup_libxml2.sh rename to fuzzers/full_system/nyx_libxml2_parallel/setup_libxml2.sh index e942831e02..f169faa0c7 100755 --- a/fuzzers/nyx_libxml2_standalone/setup_libxml2.sh +++ b/fuzzers/full_system/nyx_libxml2_parallel/setup_libxml2.sh @@ -26,7 +26,7 @@ cd ./libxml2/ || exit ./autogen.sh --enable-shared=no || exit make -j || exit cd - || exit -python3 "../../libafl_nyx/packer/packer/nyx_packer.py" \ +python3 "../../../libafl_nyx/packer/packer/nyx_packer.py" \ ./libxml2/xmllint \ /tmp/nyx_libxml2 \ afl \ @@ -36,4 +36,4 @@ python3 "../../libafl_nyx/packer/packer/nyx_packer.py" \ --fast_reload_mode \ --purge || exit -python3 ../../libafl_nyx/packer/packer/nyx_config_gen.py /tmp/nyx_libxml2/ Kernel || exit +python3 ../../../libafl_nyx/packer/packer/nyx_config_gen.py /tmp/nyx_libxml2/ Kernel || exit diff --git a/fuzzers/nyx_libxml2_parallel/src/bin/libafl_cc.rs b/fuzzers/full_system/nyx_libxml2_parallel/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/nyx_libxml2_parallel/src/bin/libafl_cc.rs rename to fuzzers/full_system/nyx_libxml2_parallel/src/bin/libafl_cc.rs diff --git a/fuzzers/fuzzbench/src/bin/libafl_cxx.rs b/fuzzers/full_system/nyx_libxml2_parallel/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/fuzzbench/src/bin/libafl_cxx.rs rename to fuzzers/full_system/nyx_libxml2_parallel/src/bin/libafl_cxx.rs diff --git a/fuzzers/nyx_libxml2_parallel/src/main.rs b/fuzzers/full_system/nyx_libxml2_parallel/src/main.rs similarity index 91% rename from fuzzers/nyx_libxml2_parallel/src/main.rs rename to fuzzers/full_system/nyx_libxml2_parallel/src/main.rs index 017d9c667d..02de207cf9 100644 --- a/fuzzers/nyx_libxml2_parallel/src/main.rs +++ b/fuzzers/full_system/nyx_libxml2_parallel/src/main.rs @@ -2,7 +2,7 @@ use std::path::{Path, PathBuf}; use libafl::{ corpus::{CachedOnDiskCorpus, Corpus, OnDiskCorpus, Testcase}, - events::{launcher::Launcher, EventConfig}, + events::{launcher::Launcher, ClientDescription, EventConfig}, feedbacks::{CrashFeedback, MaxMapFeedback}, inputs::BytesInput, monitors::MultiMonitor, @@ -14,7 +14,7 @@ use libafl::{ Error, Fuzzer, StdFuzzer, }; use libafl_bolts::{ - core_affinity::{CoreId, Cores}, + core_affinity::Cores, rands::StdRand, shmem::{ShMemProvider, StdShMemProvider}, tuples::tuple_list, @@ -31,10 +31,12 @@ fn main() { let parent_cpu_id = cores.ids.first().expect("unable to get first core id"); // region: fuzzer start function - let mut run_client = |state: Option<_>, mut restarting_mgr, core_id: CoreId| { + let mut run_client = |state: Option<_>, + mut restarting_mgr, + client_description: ClientDescription| { // nyx stuff let settings = NyxSettings::builder() - .cpu_id(core_id.0) + .cpu_id(client_description.core_id().0) .parent_cpu_id(Some(parent_cpu_id.0)) .build(); let helper = NyxHelper::new("/tmp/nyx_libxml2/", settings).unwrap(); diff --git a/fuzzers/full_system/nyx_libxml2_standalone/Cargo.toml b/fuzzers/full_system/nyx_libxml2_standalone/Cargo.toml new file mode 100644 index 0000000000..36d50491d4 --- /dev/null +++ b/fuzzers/full_system/nyx_libxml2_standalone/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "nyx_libxml2_standalone" +version = "0.14.1" +edition = "2021" +default-run = "nyx_libxml2_standalone" + +[dependencies] +libafl = { path = "../../../libafl", features = ["tui_monitor"] } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_cc = { path = "../../../libafl_cc" } +libafl_nyx = { path = "../../../libafl_nyx" } + +log = { version = "0.4.22", features = ["release_max_level_info"] } + +[profile.release] +codegen-units = 1 +opt-level = 3 diff --git a/fuzzers/nyx_libxml2_parallel/Makefile.toml b/fuzzers/full_system/nyx_libxml2_standalone/Makefile.toml similarity index 81% rename from fuzzers/nyx_libxml2_parallel/Makefile.toml rename to fuzzers/full_system/nyx_libxml2_standalone/Makefile.toml index ded65f8afd..7fb8746b1a 100644 --- a/fuzzers/nyx_libxml2_parallel/Makefile.toml +++ b/fuzzers/full_system/nyx_libxml2_standalone/Makefile.toml @@ -1,20 +1,20 @@ # Variables [env] -FUZZER_NAME='nyx_libxml2_parallel' +FUZZER_NAME = 'nyx_libxml2_standalone' PROJECT_DIR = { script = ["pwd"] } [config] skip_core_tasks = true # skip `cargo test` to avoid error [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this platform" ''' [tasks.build] -dependencies = [ "libxml2" ] +dependencies = ["libxml2"] [tasks.libxml2] linux_alias = "libxml2_unix" @@ -23,14 +23,14 @@ windows_alias = "unsupported" [tasks.libxml2_unix] # condition = { files_not_exist = ["./libxml2"]} -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' ./setup_libxml2.sh ''' [tasks.enable_kvm_vmware_hypercall] script_runner = "@shell" -script=''' +script = ''' if [ ! -e /sys/module/kvm/parameters/enable_vmware_backdoor ] || ! grep -qF Y /sys/module/kvm/parameters/enable_vmware_backdoor; then sudo modprobe -r kvm-intel # or kvm-amd for AMD @@ -48,10 +48,10 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' cargo run ''' -dependencies = [ "libxml2", "enable_kvm_vmware_hypercall" ] +dependencies = ["libxml2", "enable_kvm_vmware_hypercall"] # Clean up [tasks.clean] @@ -62,8 +62,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' make -C ./libxml2 clean cargo clean ''' diff --git a/fuzzers/nyx_libxml2_standalone/README.md b/fuzzers/full_system/nyx_libxml2_standalone/README.md similarity index 100% rename from fuzzers/nyx_libxml2_standalone/README.md rename to fuzzers/full_system/nyx_libxml2_standalone/README.md diff --git a/fuzzers/nyx_libxml2_parallel/setup_libxml2.sh b/fuzzers/full_system/nyx_libxml2_standalone/setup_libxml2.sh similarity index 87% rename from fuzzers/nyx_libxml2_parallel/setup_libxml2.sh rename to fuzzers/full_system/nyx_libxml2_standalone/setup_libxml2.sh index e942831e02..f169faa0c7 100755 --- a/fuzzers/nyx_libxml2_parallel/setup_libxml2.sh +++ b/fuzzers/full_system/nyx_libxml2_standalone/setup_libxml2.sh @@ -26,7 +26,7 @@ cd ./libxml2/ || exit ./autogen.sh --enable-shared=no || exit make -j || exit cd - || exit -python3 "../../libafl_nyx/packer/packer/nyx_packer.py" \ +python3 "../../../libafl_nyx/packer/packer/nyx_packer.py" \ ./libxml2/xmllint \ /tmp/nyx_libxml2 \ afl \ @@ -36,4 +36,4 @@ python3 "../../libafl_nyx/packer/packer/nyx_packer.py" \ --fast_reload_mode \ --purge || exit -python3 ../../libafl_nyx/packer/packer/nyx_config_gen.py /tmp/nyx_libxml2/ Kernel || exit +python3 ../../../libafl_nyx/packer/packer/nyx_config_gen.py /tmp/nyx_libxml2/ Kernel || exit diff --git a/fuzzers/nyx_libxml2_standalone/src/bin/libafl_cc.rs b/fuzzers/full_system/nyx_libxml2_standalone/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/nyx_libxml2_standalone/src/bin/libafl_cc.rs rename to fuzzers/full_system/nyx_libxml2_standalone/src/bin/libafl_cc.rs diff --git a/fuzzers/fuzzbench_ctx/src/bin/libafl_cxx.rs b/fuzzers/full_system/nyx_libxml2_standalone/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/fuzzbench_ctx/src/bin/libafl_cxx.rs rename to fuzzers/full_system/nyx_libxml2_standalone/src/bin/libafl_cxx.rs diff --git a/fuzzers/nyx_libxml2_standalone/src/main.rs b/fuzzers/full_system/nyx_libxml2_standalone/src/main.rs similarity index 93% rename from fuzzers/nyx_libxml2_standalone/src/main.rs rename to fuzzers/full_system/nyx_libxml2_standalone/src/main.rs index 3ba9e02d39..bd9bc00f38 100644 --- a/fuzzers/nyx_libxml2_standalone/src/main.rs +++ b/fuzzers/full_system/nyx_libxml2_standalone/src/main.rs @@ -5,7 +5,7 @@ use libafl::{ events::SimpleEventManager, feedbacks::{CrashFeedback, MaxMapFeedback}, inputs::BytesInput, - monitors::tui::{ui::TuiUI, TuiMonitor}, + monitors::tui::TuiMonitor, mutators::{havoc_mutations, StdScheduledMutator}, observers::StdMapObserver, schedulers::RandScheduler, @@ -40,8 +40,7 @@ fn main() { // switch monitor if you want // let monitor = SimpleMonitor::new(|x|-> () {println!("{}",x)}); - let ui = TuiUI::new(String::from("test_fuzz"), true); - let monitor = TuiMonitor::new(ui); + let monitor = TuiMonitor::builder().title("test_fuzz").build(); let mut mgr = SimpleEventManager::new(monitor); let mut executor = NyxExecutor::builder().build(helper, tuple_list!(observer)); diff --git a/fuzzers/qemu_systemmode/.gitignore b/fuzzers/full_system/qemu_baremetal/.gitignore similarity index 100% rename from fuzzers/qemu_systemmode/.gitignore rename to fuzzers/full_system/qemu_baremetal/.gitignore diff --git a/fuzzers/full_system/qemu_baremetal/Cargo.toml b/fuzzers/full_system/qemu_baremetal/Cargo.toml new file mode 100644 index 0000000000..22c58a9144 --- /dev/null +++ b/fuzzers/full_system/qemu_baremetal/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "qemu_baremetal" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", + "Romain Malmain ", +] +edition = "2021" + +[features] +default = ["std", "low_level"] +std = [] + +low_level = [ +] # The low-level way to interact with LibAFL QEMU, with direct calls to QEMU's functions +breakpoint = [] # Uses the command system, with breakpoints +sync_exit = [] # Uses the command system, with sync exit. + +shared = ["libafl_qemu/shared"] + +[profile.release] +incremental = true +debug = true +lto = "fat" +codegen-units = 1 + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets" } +libafl_qemu = { path = "../../../libafl_qemu", features = [ + "arm", + "systemmode", +], default-features = false } +env_logger = "0.11.5" +log = { version = "0.4.22", features = ["release_max_level_info"] } + +[build-dependencies] +libafl_qemu_build = { path = "../../../libafl_qemu/libafl_qemu_build" } diff --git a/fuzzers/full_system/qemu_baremetal/Makefile.toml b/fuzzers/full_system/qemu_baremetal/Makefile.toml new file mode 100644 index 0000000000..69f71a1640 --- /dev/null +++ b/fuzzers/full_system/qemu_baremetal/Makefile.toml @@ -0,0 +1,234 @@ +env_scripts = [''' +#!@duckscript +profile = get_env PROFILE + +if eq ${profile} "dev" + set_env PROFILE_DIR debug +else + set_env PROFILE_DIR ${profile} +end +''', ''' +#!@duckscript +runs_on_ci = get_env RUN_ON_CI + +if ${runs_on_ci} + cargo_target_dir = get_env CARGO_MAKE_CRATE_TARGET_DIRECTORY + set_env TARGET_DIR ${cargo_target_dir} + set_env KERNEL ${cargo_target_dir}/example.elf +end +'''] + +[env] +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +TARGET_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${FEATURE}" +LIBAFL_QEMU_CLONE_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/qemu-libafl-bridge" +KERNEL = "${TARGET_DIR}/example.elf" + +[tasks.target_dir] +condition = { files_not_exist = ["${TARGET_DIR}"] } +script_runner = "@shell" +script = ''' +mkdir -p ${TARGET_DIR} +''' + +[tasks.image] +dependencies = ["target_dir"] +condition = { files_not_exist = ["${TARGET_DIR}/dummy.qcow2"] } +script_runner = "@shell" +script = ''' +qemu-img create -f qcow2 ${TARGET_DIR}/dummy.qcow2 32M +''' + +[tasks.target] +dependencies = ["target_dir"] +condition = { env_set = ["TARGET_DEFINE"] } +command = "arm-none-eabi-gcc" +args = [ + "-ggdb", + "-ffreestanding", + "-nostartfiles", + "-lgcc", + "-T", + "${CARGO_MAKE_WORKING_DIRECTORY}/example/mps2_m3.ld", + "-mcpu=cortex-m3", + "${CARGO_MAKE_WORKING_DIRECTORY}/example/main.c", + "${CARGO_MAKE_WORKING_DIRECTORY}/example/startup.c", + "-D", + "${TARGET_DEFINE}", + "-I", + "${TARGET_DIR}/${PROFILE_DIR}/include", + "-o", + "${TARGET_DIR}/example.elf", +] + +[tasks.build_fuzzer] +condition = { env_set = ["FEATURE"] } +command = "cargo" +args = [ + "build", + "--profile", + "${PROFILE}", + "--no-default-features", + "--features", + "std,${FEATURE}", + "--target-dir", + "${TARGET_DIR}", +] +dependencies = ["image"] + +[tasks.run_fuzzer] +command = "${TARGET_DIR}/${PROFILE_DIR}/qemu_baremetal" +args = [ + "-icount", + "shift=auto,align=off,sleep=off", + "-machine", + "mps2-an385", + "-monitor", + "null", + "-kernel", + "${TARGET_DIR}/example.elf", + "-serial", + "null", + "-nographic", + "-snapshot", + "-drive", + "if=none,format=qcow2,file=${TARGET_DIR}/dummy.qcow2", + "-S", +] +dependencies = ["target"] + +[tasks.test_fuzzer] +condition = { env_set = ["FEATURE"] } +script_runner = "@shell" +script = ''' +TMP_DIR=$(mktemp -d) + +cargo make build_$FEATURE +timeout 20s ${TARGET_DIR}/${PROFILE_DIR}/qemu_baremetal -icount shift=auto,align=off,sleep=off -machine mps2-an385 -monitor null -kernel ${TARGET_DIR}/example.elf -serial null -nographic -snapshot -drive if=none,format=qcow2,file=${TARGET_DIR}/dummy.qcow2 -S | tee $TMP_DIR/fuzz.log 2>&1 || true + +if [ -z "$(grep 'Objective' $TMP_DIR/fuzz.log)" ]; then + echo "qemu_baremetal ${FEATURE}: Fuzzer did not find the objective in $TMP_DIR/fuzz.log" + exit 1 +else + echo "qemu_baremetal ${FEATURE}: Objective found." +fi +''' +dependencies = ["target"] + +[tasks.build_low_level] +command = "cargo" +args = [ + "make", + "-e", + "FEATURE=low_level", + "-e", + "TARGET_DEFINE=TARGET_CLASSIC", + "build_fuzzer", +] + +[tasks.test_low_level] +command = "cargo" +args = [ + "make", + "-e", + "FEATURE=low_level", + "-e", + "TARGET_DEFINE=TARGET_CLASSIC", + "test_fuzzer", +] + +[tasks.build_breakpoint] +command = "cargo" +args = [ + "make", + "-e", + "FEATURE=breakpoint", + "-e", + "TARGET_DEFINE=TARGET_BREAKPOINT", + "build_fuzzer", +] + +[tasks.test_breakpoint] +command = "cargo" +args = [ + "make", + "-e", + "FEATURE=breakpoint", + "-e", + "TARGET_DEFINE=TARGET_BREAKPOINT", + "test_fuzzer", +] + +[tasks.build_sync_exit] +command = "cargo" +args = [ + "make", + "-e", + "FEATURE=sync_exit", + "-e", + "TARGET_DEFINE=TARGET_SYNC_EXIT", + "build_fuzzer", +] + +[tasks.test_sync_exit] +command = "cargo" +args = [ + "make", + "-e", + "FEATURE=sync_exit", + "-e", + "TARGET_DEFINE=TARGET_SYNC_EXIT", + "test_fuzzer", +] + +[tasks.low_level] +command = "cargo" +args = [ + "make", + "-e", + "FEATURE=low_level", + "-e", + "TARGET_DEFINE=TARGET_CLASSIC", + "run_fuzzer", +] + +[tasks.breakpoint] +command = "cargo" +args = [ + "make", + "-e", + "FEATURE=breakpoint", + "-e", + "TARGET_DEFINE=TARGET_BREAKPOINT", + "run_fuzzer", +] + +[tasks.sync_exit] +command = "cargo" +args = [ + "make", + "-e", + "FEATURE=sync_exit", + "-e", + "TARGET_DEFINE=TARGET_SYNC_EXIT", + "run_fuzzer", +] + +[tasks.test] +clear = true +run_task = { name = ["test_low_level", "test_breakpoint", "test_sync_exit"] } + +[tasks.build] +clear = true +run_task = { name = ["build_low_level", "build_breakpoint", "build_sync_exit"] } + +[tasks.run] +alias = "low_level" + +[tasks.clean] +clear = true +script_runner = "@shell" +script = ''' +rm -rf ${CARGO_MAKE_CRATE_TARGET_DIRECTORY} +cargo clean +''' diff --git a/fuzzers/qemu_systemmode/README.md b/fuzzers/full_system/qemu_baremetal/README.md similarity index 83% rename from fuzzers/qemu_systemmode/README.md rename to fuzzers/full_system/qemu_baremetal/README.md index 12ad9af951..f151f3f0eb 100644 --- a/fuzzers/qemu_systemmode/README.md +++ b/fuzzers/full_system/qemu_baremetal/README.md @@ -1,10 +1,11 @@ -# Qemu systemmode with launcher +# Qemu baremetal with launcher This folder contains an example fuzzer for the qemu systemmode, using LLMP for fast multi-process fuzzing and crash detection. +The target is a simpel baremetal arm target. It comes in three flavours (can be set through features): --`classic`: The low-level way to interact with QEMU. +-`low_level`: The low-level way to interact with QEMU. -`breakpoint`: Interaction with QEMU using the command system, leveraging breakpoints. -`sync_exit`: Interaction with QEMU using the command system, leveraging sync exits. @@ -36,7 +37,7 @@ It is also possible to run the fuzzer with the other features: cargo make ``` -With feature being `classic`, `breakpoint` or `sync_exit`. +With feature being `low_level`, `breakpoint` or `sync_exit`. This will build the desired fuzzer (src/fuzzer_.rs) and a small example binary based on FreeRTOS, which can run under a qemu emulation target. Since the instrumentation is based on snapshots, QEMU needs a virtual drive (even if it is unused...). diff --git a/fuzzers/qemu_systemmode/build.rs b/fuzzers/full_system/qemu_baremetal/build.rs similarity index 69% rename from fuzzers/qemu_systemmode/build.rs rename to fuzzers/full_system/qemu_baremetal/build.rs index 9f7f8443ac..9f2ee81991 100644 --- a/fuzzers/qemu_systemmode/build.rs +++ b/fuzzers/full_system/qemu_baremetal/build.rs @@ -5,7 +5,7 @@ macro_rules! assert_unique_feature { () => {}; ($first:tt $(,$rest:tt)*) => { $( - #[cfg(all(not(any(doc, feature = "clippy")), feature = $first, feature = $rest))] + #[cfg(all(not(any(doc, clippy)), feature = $first, feature = $rest))] compile_error!(concat!("features \"", $first, "\" and \"", $rest, "\" cannot be used together")); )* assert_unique_feature!($($rest),*); @@ -13,7 +13,7 @@ macro_rules! assert_unique_feature { } fn main() { - assert_unique_feature!("classic", "breakpoint", "sync_exit"); + assert_unique_feature!("low_level", "breakpoint", "sync_exit"); build_libafl_qemu(); } diff --git a/fuzzers/qemu_systemmode/corpus/random b/fuzzers/full_system/qemu_baremetal/corpus/random similarity index 100% rename from fuzzers/qemu_systemmode/corpus/random rename to fuzzers/full_system/qemu_baremetal/corpus/random diff --git a/fuzzers/qemu_systemmode/corpus/zero b/fuzzers/full_system/qemu_baremetal/corpus/zero similarity index 100% rename from fuzzers/qemu_systemmode/corpus/zero rename to fuzzers/full_system/qemu_baremetal/corpus/zero diff --git a/fuzzers/qemu_systemmode/example/main.c b/fuzzers/full_system/qemu_baremetal/example/main.c similarity index 89% rename from fuzzers/qemu_systemmode/example/main.c rename to fuzzers/full_system/qemu_baremetal/example/main.c index 40944fab37..3e94204ae0 100644 --- a/fuzzers/qemu_systemmode/example/main.c +++ b/fuzzers/full_system/qemu_baremetal/example/main.c @@ -2,13 +2,15 @@ #include "libafl_qemu.h" #endif +#ifndef TARGET_SYNC_EXIT int __attribute__((noinline)) BREAKPOINT() { for (;;) {} } +#endif int LLVMFuzzerTestOneInput(unsigned int *Data, unsigned int Size) { #ifdef TARGET_SYNC_EXIT - LIBAFL_QEMU_START_PHYS((unsigned int)Data, Size); + libafl_qemu_start_phys((void *)Data, Size); #endif if (Data[3] == 0) { while (1) {} @@ -27,9 +29,10 @@ int LLVMFuzzerTestOneInput(unsigned int *Data, unsigned int Size) { } } #ifdef TARGET_SYNC_EXIT - LIBAFL_QEMU_END(LIBAFL_QEMU_END_OK); -#endif + libafl_qemu_end(LIBAFL_QEMU_END_OK); +#else return BREAKPOINT(); +#endif } unsigned int FUZZ_INPUT[] = { 101, 201, 700, 230, 860, 234, 980, 200, 340, 678, 230, 134, 900, diff --git a/fuzzers/qemu_systemmode/example/mps2_m3.ld b/fuzzers/full_system/qemu_baremetal/example/mps2_m3.ld similarity index 100% rename from fuzzers/qemu_systemmode/example/mps2_m3.ld rename to fuzzers/full_system/qemu_baremetal/example/mps2_m3.ld diff --git a/fuzzers/qemu_systemmode/example/startup.c b/fuzzers/full_system/qemu_baremetal/example/startup.c similarity index 100% rename from fuzzers/qemu_systemmode/example/startup.c rename to fuzzers/full_system/qemu_baremetal/example/startup.c diff --git a/fuzzers/qemu_systemmode/src/fuzzer_breakpoint.rs b/fuzzers/full_system/qemu_baremetal/src/fuzzer_breakpoint.rs similarity index 80% rename from fuzzers/qemu_systemmode/src/fuzzer_breakpoint.rs rename to fuzzers/full_system/qemu_baremetal/src/fuzzer_breakpoint.rs index 8f9a16261f..d6a91bd267 100644 --- a/fuzzers/qemu_systemmode/src/fuzzer_breakpoint.rs +++ b/fuzzers/full_system/qemu_baremetal/src/fuzzer_breakpoint.rs @@ -1,6 +1,6 @@ //! A fuzzer using qemu in systemmode for binary-only coverage of kernels //! -use core::{ptr::addr_of_mut, time::Duration}; +use core::time::Duration; use std::{env, path::PathBuf, process}; use libafl::{ @@ -12,7 +12,7 @@ use libafl::{ fuzzer::{Fuzzer, StdFuzzer}, inputs::BytesInput, monitors::MultiMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::{CanTrack, HitcountsMapObserver, TimeObserver, VariableMapObserver}, schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, stages::{CalibrationStage, StdMutationalStage}, @@ -29,14 +29,14 @@ use libafl_bolts::{ }; use libafl_qemu::{ breakpoint::Breakpoint, - command::{EndCommand, StartCommand, StdCommandManager}, - edges::{edges_map_mut_ptr, QemuEdgeCoverageHelper, EDGES_MAP_SIZE_IN_USE, MAX_EDGES_FOUND}, + command::{EndCommand, StartCommand}, elf::EasyElf, emu::Emulator, - executor::{stateful::StatefulQemuExecutor, QemuExecutorState}, - EmulatorMemoryChunk, FastSnapshotManager, GuestPhysAddr, GuestReg, QemuHooks, - StdEmulatorExitHandler, + executor::QemuExecutor, + modules::edges::StdEdgeCoverageModule, + GuestPhysAddr, GuestReg, QemuMemoryChunk, }; +use libafl_targets::{edges_map_mut_ptr, EDGES_MAP_DEFAULT_SIZE, MAX_EDGES_FOUND}; // use libafl_qemu::QemuSnapshotBuilder; // for normal qemu snapshot @@ -83,60 +83,21 @@ pub fn fuzz() { .expect("Symbol or env BREAKPOINT not found"); println!("Breakpoint address = {breakpoint:#x}"); - let mut run_client = |state: Option<_>, mut mgr, _core_id| { - // Initialize QEMU + let mut run_client = |state: Option<_>, mut mgr, _client_description| { let args: Vec = env::args().collect(); - let env: Vec<(String, String)> = env::vars().collect(); - - // Choose Snapshot Builder - // let emu_snapshot_manager = QemuSnapshotBuilder::new(true); - let emu_snapshot_manager = FastSnapshotManager::new(); - - // Choose Exit Handler - let emu_exit_handler = StdEmulatorExitHandler::new(emu_snapshot_manager); - - // Choose Command Manager - let cmd_manager = StdCommandManager::new(); - - // Create emulator - let emu = Emulator::new(&args, &env, emu_exit_handler, cmd_manager).unwrap(); - - // Set breakpoints of interest with corresponding commands. - emu.add_breakpoint( - Breakpoint::with_command( - main_addr, - StartCommand::new(EmulatorMemoryChunk::phys( - input_addr, - unsafe { MAX_INPUT_SIZE } as GuestReg, - None, - )), - true, - ), - true, - ); - emu.add_breakpoint( - Breakpoint::with_command(breakpoint, EndCommand::new(Some(ExitKind::Ok)), false), - true, - ); - - let devices = emu.list_devices(); - println!("Devices = {:?}", devices); // The wrapped harness function, calling out to the LLVM-style harness let mut harness = - |input: &BytesInput, qemu_executor_state: &mut QemuExecutorState<_, _>| unsafe { - emu.run(input, qemu_executor_state) - .unwrap() - .try_into() - .unwrap() + |emulator: &mut Emulator<_, _, _, _, _>, state: &mut _, input: &BytesInput| unsafe { + emulator.run(state, input).unwrap().try_into().unwrap() }; // Create an observation channel using the coverage map - let edges_observer = unsafe { + let mut edges_observer = unsafe { HitcountsMapObserver::new(VariableMapObserver::from_mut_slice( "edges", - OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_SIZE_IN_USE), - addr_of_mut!(MAX_EDGES_FOUND), + OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_DEFAULT_SIZE), + &raw mut MAX_EDGES_FOUND, )) .track_indices() }; @@ -144,6 +105,43 @@ pub fn fuzz() { // Create an observation channel to keep track of the execution time let time_observer = TimeObserver::new("time"); + // Initialize QEMU Emulator + let emu = Emulator::builder() + .qemu_cli(args) + .add_module( + StdEdgeCoverageModule::builder() + .map_observer(edges_observer.as_mut()) + .build()?, + ) + .build() + .unwrap(); + + // Set breakpoints of interest with corresponding commands. + emu.add_breakpoint( + Breakpoint::with_command( + main_addr, + StartCommand::new(QemuMemoryChunk::phys( + input_addr, + unsafe { MAX_INPUT_SIZE } as GuestReg, + None, + )) + .into(), + true, + ), + true, + ); + emu.add_breakpoint( + Breakpoint::with_command( + breakpoint, + EndCommand::new(Some(ExitKind::Ok)).into(), + false, + ), + true, + ); + + let devices = emu.list_devices(); + println!("Devices = {:?}", devices); + // Feedback to rate the interestingness of an input // This one is composed by two Feedbacks in OR let mut feedback = feedback_or!( @@ -182,11 +180,6 @@ pub fn fuzz() { // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); - let mut hooks = QemuHooks::new( - emu.qemu().clone(), - tuple_list!(QemuEdgeCoverageHelper::default()), - ); - // Setup an havoc mutator with a mutational stage let mutator = StdScheduledMutator::new(havoc_mutations()); let calibration_feedback = MaxMapFeedback::new(&edges_observer); @@ -196,8 +189,8 @@ pub fn fuzz() { ); // Create a QEMU in-process executor - let mut executor = StatefulQemuExecutor::new( - &mut hooks, + let mut executor = QemuExecutor::new( + emu, &mut harness, tuple_list!(edges_observer, time_observer), &mut fuzzer, diff --git a/fuzzers/qemu_systemmode/src/fuzzer_classic.rs b/fuzzers/full_system/qemu_baremetal/src/fuzzer_low_level.rs similarity index 65% rename from fuzzers/qemu_systemmode/src/fuzzer_classic.rs rename to fuzzers/full_system/qemu_baremetal/src/fuzzer_low_level.rs index 87575d6592..aefd60af13 100644 --- a/fuzzers/qemu_systemmode/src/fuzzer_classic.rs +++ b/fuzzers/full_system/qemu_baremetal/src/fuzzer_low_level.rs @@ -1,6 +1,6 @@ //! A fuzzer using qemu in systemmode for binary-only coverage of kernels //! -use core::{ptr::addr_of_mut, time::Duration}; +use core::time::Duration; use std::{env, path::PathBuf, process}; use libafl::{ @@ -12,7 +12,7 @@ use libafl::{ fuzzer::{Fuzzer, StdFuzzer}, inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::{CanTrack, HitcountsMapObserver, TimeObserver, VariableMapObserver}, schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, stages::StdMutationalStage, @@ -22,7 +22,6 @@ use libafl::{ use libafl_bolts::{ core_affinity::Cores, current_nanos, - os::unix_signals::{Signal, CTRL_C_EXIT}, ownedref::OwnedMutSlice, rands::StdRand, shmem::{ShMemProvider, StdShMemProvider}, @@ -30,12 +29,11 @@ use libafl_bolts::{ AsSlice, }; use libafl_qemu::{ - edges::{edges_map_mut_ptr, QemuEdgeCoverageHelper, EDGES_MAP_SIZE_IN_USE, MAX_EDGES_FOUND}, - elf::EasyElf, - Qemu, QemuExecutor, QemuExitError, QemuExitReason, QemuHooks, QemuRWError, QemuShutdownCause, + config, elf::EasyElf, executor::QemuExecutor, modules::edges::StdEdgeCoverageModuleBuilder, + Emulator, GuestPhysAddr, Qemu, QemuExitError, QemuExitReason, QemuRWError, QemuShutdownCause, Regs, }; -use libafl_qemu_sys::GuestPhysAddr; +use libafl_targets::{edges_map_mut_ptr, EDGES_MAP_DEFAULT_SIZE, MAX_EDGES_FOUND}; pub static mut MAX_INPUT_SIZE: usize = 50; @@ -82,19 +80,54 @@ pub fn fuzz() { .expect("Symbol or env BREAKPOINT not found"); println!("Breakpoint address = {breakpoint:#x}"); - let mut run_client = |state: Option<_>, mut mgr, _core_id| { + let mut run_client = |state: Option<_>, mut mgr, _client_description| { + let target_dir = env::var("TARGET_DIR").expect("TARGET_DIR env not set"); + + // Create an observation channel using the coverage map + let mut edges_observer = unsafe { + HitcountsMapObserver::new(VariableMapObserver::from_mut_slice( + "edges", + OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_DEFAULT_SIZE), + &raw mut MAX_EDGES_FOUND, + )) + .track_indices() + }; + // Initialize QEMU - let args: Vec = env::args().collect(); - let env: Vec<(String, String)> = env::vars().collect(); - let qemu = Qemu::init(&args, &env).unwrap(); + let qemu = Qemu::builder() + .machine("mps2-an385") + .monitor(config::Monitor::Null) + .kernel(format!("{target_dir}/example.elf")) + .serial(config::Serial::Null) + .no_graphic(true) + .snapshot(true) + .drives([config::Drive::builder() + .interface(config::DriveInterface::None) + .format(config::DiskImageFileFormat::Qcow2) + .file(format!("{target_dir}/dummy.qcow2")) + .build()]) + .start_cpu(false) + .build() + .expect("Failed to initialized QEMU"); + + let emulator_modules = tuple_list!(StdEdgeCoverageModuleBuilder::default() + .map_observer(edges_observer.as_mut()) + .build()?); + + let emulator = Emulator::empty() + .qemu(qemu) + .modules(emulator_modules) + .build()?; qemu.set_breakpoint(main_addr); + unsafe { match qemu.run() { Ok(QemuExitReason::Breakpoint(_)) => {} _ => panic!("Unexpected QEMU exit."), } } + qemu.remove_breakpoint(main_addr); qemu.set_breakpoint(breakpoint); // BREAKPOINT @@ -111,62 +144,56 @@ pub fn fuzz() { let snap = qemu.create_fast_snapshot(true); // The wrapped harness function, calling out to the LLVM-style harness - let mut harness = |input: &BytesInput| { - let target = input.target_bytes(); - let mut buf = target.as_slice(); - let len = buf.len(); - unsafe { - if len > MAX_INPUT_SIZE { - buf = &buf[0..MAX_INPUT_SIZE]; - // len = MAX_INPUT_SIZE; + let mut harness = + |emulator: &mut Emulator<_, _, _, _, _>, _state: &mut _, input: &BytesInput| { + let target = input.target_bytes(); + let mut buf = target.as_slice(); + let len = buf.len(); + unsafe { + if len > MAX_INPUT_SIZE { + buf = &buf[0..MAX_INPUT_SIZE]; + // len = MAX_INPUT_SIZE; + } + + qemu.write_phys_mem(input_addr, buf); + + match emulator.qemu().run() { + Ok(QemuExitReason::Breakpoint(_)) => {} // continue execution, nothing to do there. + Ok(QemuExitReason::Timeout) => return ExitKind::Timeout, // timeout, propagate + Ok(QemuExitReason::End(QemuShutdownCause::HostSignal(signal))) => { + // will take care of cleanly stopping the fuzzer. + signal.handle() + } + + Err(QemuExitError::UnexpectedExit) => return ExitKind::Crash, + e => panic!("Unexpected QEMU exit: {e:?}."), + } + + // If the execution stops at any point other than the designated breakpoint (e.g. a breakpoint on a panic method) we consider it a crash + let mut pcs = (0..qemu.num_cpus()) + .map(|i| qemu.cpu_from_index(i)) + .map(|cpu| -> Result { cpu.read_reg(Regs::Pc) }); + let ret = match pcs + .find(|pc| (breakpoint..breakpoint + 5).contains(pc.as_ref().unwrap_or(&0))) + { + Some(_) => ExitKind::Ok, + None => ExitKind::Crash, + }; + + // OPTION 1: restore only the CPU state (registers et. al) + // for (i, s) in saved_cpu_states.iter().enumerate() { + // emu.cpu_from_index(i).restore_state(s); + // } + + // OPTION 2: restore a slow vanilla QEMU snapshot + // emu.load_snapshot("start", true); + + // OPTION 3: restore a fast devices+mem snapshot + qemu.restore_fast_snapshot(snap); + + ret } - - qemu.write_phys_mem(input_addr, buf); - - match qemu.run() { - Ok(QemuExitReason::Breakpoint(_)) => {} - Ok(QemuExitReason::End(QemuShutdownCause::HostSignal( - Signal::SigInterrupt, - ))) => process::exit(CTRL_C_EXIT), - Err(QemuExitError::UnexpectedExit) => return ExitKind::Crash, - _ => panic!("Unexpected QEMU exit."), - } - - // If the execution stops at any point other then the designated breakpoint (e.g. a breakpoint on a panic method) we consider it a crash - let mut pcs = (0..qemu.num_cpus()) - .map(|i| qemu.cpu_from_index(i)) - .map(|cpu| -> Result { cpu.read_reg(Regs::Pc) }); - let ret = match pcs - .find(|pc| (breakpoint..breakpoint + 5).contains(pc.as_ref().unwrap_or(&0))) - { - Some(_) => ExitKind::Ok, - None => ExitKind::Crash, - }; - - // OPTION 1: restore only the CPU state (registers et. al) - // for (i, s) in saved_cpu_states.iter().enumerate() { - // emu.cpu_from_index(i).restore_state(s); - // } - - // OPTION 2: restore a slow vanilla QEMU snapshot - // emu.load_snapshot("start", true); - - // OPTION 3: restore a fast devices+mem snapshot - qemu.restore_fast_snapshot(snap); - - ret - } - }; - - // Create an observation channel using the coverage map - let edges_observer = unsafe { - HitcountsMapObserver::new(VariableMapObserver::from_mut_slice( - "edges", - OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_SIZE_IN_USE), - addr_of_mut!(MAX_EDGES_FOUND), - )) - .track_indices() - }; + }; // Create an observation channel to keep track of the execution time let time_observer = TimeObserver::new("time"); @@ -209,12 +236,9 @@ pub fn fuzz() { // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); - let mut hooks = - QemuHooks::new(qemu.clone(), tuple_list!(QemuEdgeCoverageHelper::default())); - // Create a QEMU in-process executor let mut executor = QemuExecutor::new( - &mut hooks, + emulator, &mut harness, tuple_list!(edges_observer, time_observer), &mut fuzzer, diff --git a/fuzzers/qemu_systemmode/src/fuzzer_sync_exit.rs b/fuzzers/full_system/qemu_baremetal/src/fuzzer_sync_exit.rs similarity index 78% rename from fuzzers/qemu_systemmode/src/fuzzer_sync_exit.rs rename to fuzzers/full_system/qemu_baremetal/src/fuzzer_sync_exit.rs index 78fe90f09d..fe6bc314f4 100644 --- a/fuzzers/qemu_systemmode/src/fuzzer_sync_exit.rs +++ b/fuzzers/full_system/qemu_baremetal/src/fuzzer_sync_exit.rs @@ -1,6 +1,6 @@ //! A fuzzer using qemu in systemmode for binary-only coverage of kernels //! -use core::{ptr::addr_of_mut, time::Duration}; +use core::time::Duration; use std::{env, path::PathBuf, process}; use libafl::{ @@ -11,7 +11,7 @@ use libafl::{ fuzzer::{Fuzzer, StdFuzzer}, inputs::BytesInput, monitors::MultiMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::{CanTrack, HitcountsMapObserver, TimeObserver, VariableMapObserver}, schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, stages::{CalibrationStage, StdMutationalStage}, @@ -26,14 +26,8 @@ use libafl_bolts::{ shmem::{ShMemProvider, StdShMemProvider}, tuples::tuple_list, }; -use libafl_qemu::{ - command::StdCommandManager, - edges::{edges_map_mut_ptr, QemuEdgeCoverageHelper, EDGES_MAP_SIZE_IN_USE, MAX_EDGES_FOUND}, - emu::Emulator, - executor::{stateful::StatefulQemuExecutor, QemuExecutorState}, - FastSnapshotManager, QemuHooks, StdEmulatorExitHandler, -}; - +use libafl_qemu::{emu::Emulator, executor::QemuExecutor, modules::edges::StdEdgeCoverageModule}; +use libafl_targets::{edges_map_mut_ptr, EDGES_MAP_DEFAULT_SIZE, MAX_EDGES_FOUND}; // use libafl_qemu::QemuSnapshotBuilder; for normal qemu snapshot pub fn fuzz() { @@ -49,42 +43,39 @@ pub fn fuzz() { let corpus_dirs = [PathBuf::from("./corpus")]; let objective_dir = PathBuf::from("./crashes"); - let mut run_client = |state: Option<_>, mut mgr, _core_id| { + let mut run_client = |state: Option<_>, mut mgr, _client_description| { // Initialize QEMU let args: Vec = env::args().collect(); - let env: Vec<(String, String)> = env::vars().collect(); - // let emu_snapshot_manager = QemuSnapshotBuilder::new(true); - let emu_snapshot_manager = FastSnapshotManager::new(); // Create a snapshot manager (normal or fast for now). - let emu_exit_handler: StdEmulatorExitHandler = - StdEmulatorExitHandler::new(emu_snapshot_manager); // Create an exit handler: it is the entity taking the decision of what should be done when QEMU returns. + // Create an observation channel using the coverage map + let mut edges_observer = unsafe { + HitcountsMapObserver::new(VariableMapObserver::from_mut_slice( + "edges", + OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_DEFAULT_SIZE), + &raw mut MAX_EDGES_FOUND, + )) + .track_indices() + }; - let cmd_manager = StdCommandManager::new(); + // Choose modules to use + let modules = tuple_list!(StdEdgeCoverageModule::builder() + .map_observer(edges_observer.as_mut()) + .build()?); - let emu = Emulator::new(&args, &env, emu_exit_handler, cmd_manager).unwrap(); // Create the emulator + let emu = Emulator::builder() + .qemu_cli(args) + .modules(modules) + .build()?; let devices = emu.list_devices(); println!("Devices = {:?}", devices); // The wrapped harness function, calling out to the LLVM-style harness let mut harness = - |input: &BytesInput, qemu_executor_state: &mut QemuExecutorState<_, _>| unsafe { - emu.run(input, qemu_executor_state) - .unwrap() - .try_into() - .unwrap() + |emulator: &mut Emulator<_, _, _, _, _>, state: &mut _, input: &BytesInput| unsafe { + emulator.run(state, input).unwrap().try_into().unwrap() }; - // Create an observation channel using the coverage map - let edges_observer = unsafe { - HitcountsMapObserver::new(VariableMapObserver::from_mut_slice( - "edges", - OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_SIZE_IN_USE), - addr_of_mut!(MAX_EDGES_FOUND), - )) - .track_indices() - }; - // Create an observation channel to keep track of the execution time let time_observer = TimeObserver::new("time"); @@ -126,11 +117,6 @@ pub fn fuzz() { // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); - let mut hooks = QemuHooks::new( - emu.qemu().clone(), - tuple_list!(QemuEdgeCoverageHelper::default()), - ); - // Setup an havoc mutator with a mutational stage let mutator = StdScheduledMutator::new(havoc_mutations()); let calibration_feedback = MaxMapFeedback::new(&edges_observer); @@ -140,8 +126,8 @@ pub fn fuzz() { ); // Create a QEMU in-process executor - let mut executor = StatefulQemuExecutor::new( - &mut hooks, + let mut executor = QemuExecutor::new( + emu, &mut harness, tuple_list!(edges_observer, time_observer), &mut fuzzer, diff --git a/fuzzers/qemu_systemmode/src/main.rs b/fuzzers/full_system/qemu_baremetal/src/main.rs similarity index 78% rename from fuzzers/qemu_systemmode/src/main.rs rename to fuzzers/full_system/qemu_baremetal/src/main.rs index 8ca48d6eb6..e03a33f3a8 100644 --- a/fuzzers/qemu_systemmode/src/main.rs +++ b/fuzzers/full_system/qemu_baremetal/src/main.rs @@ -1,6 +1,6 @@ //! A libfuzzer-like fuzzer using qemu for binary-only coverage -#[cfg(all(target_os = "linux", feature = "classic"))] -mod fuzzer_classic; +#[cfg(all(target_os = "linux", feature = "low_level"))] +mod fuzzer_low_level; #[cfg(all(target_os = "linux", feature = "breakpoint"))] mod fuzzer_breakpoint; @@ -10,8 +10,8 @@ mod fuzzer_sync_exit; #[cfg(target_os = "linux")] pub fn main() { - #[cfg(feature = "classic")] - fuzzer_classic::fuzz(); + #[cfg(feature = "low_level")] + fuzzer_low_level::fuzz(); #[cfg(feature = "breakpoint")] fuzzer_breakpoint::fuzz(); diff --git a/fuzzers/full_system/qemu_linux_kernel/.gitignore b/fuzzers/full_system/qemu_linux_kernel/.gitignore new file mode 100644 index 0000000000..a16dc44028 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/.gitignore @@ -0,0 +1,2 @@ +*.qcow2 +corpus_gen/ diff --git a/fuzzers/full_system/qemu_linux_kernel/Cargo.toml b/fuzzers/full_system/qemu_linux_kernel/Cargo.toml new file mode 100644 index 0000000000..9035d9b1e1 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "qemu_linux_kernel" +version = "0.14.1" +authors = [ + "Romain Malmain ", + "Dongjia Zhang ", +] +edition = "2021" + +[features] +shared = ["libafl_qemu/shared"] + +[profile.release] +incremental = true +debug = true +lto = "fat" +codegen-units = 1 + +[dependencies] +libafl = { path = "../../../../../libafl" } +libafl_bolts = { path = "../../../../../libafl_bolts" } +libafl_qemu = { path = "../../../../../libafl_qemu", features = [ + "x86_64", + "systemmode", + #"paranoid_debug" +] } +libafl_qemu_sys = { path = "../../../../../libafl_qemu/libafl_qemu_sys", features = [ + "x86_64", + "systemmode", + #"paranoid_debug" +] } +env_logger = "0.11.5" + +[build-dependencies] +libafl_qemu_build = { path = "../../../../../libafl_qemu/libafl_qemu_build" } diff --git a/fuzzers/full_system/qemu_linux_kernel/Makefile.toml b/fuzzers/full_system/qemu_linux_kernel/Makefile.toml new file mode 100644 index 0000000000..9fec43ecbe --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/Makefile.toml @@ -0,0 +1,183 @@ +env_scripts = [''' +#!@duckscript +profile = get_env PROFILE + +if eq ${profile} "dev" + set_env PROFILE_DIR debug +else + set_env PROFILE_DIR ${profile} +end +''', ''' +#!@duckscript +runs_on_ci = get_env RUN_ON_CI + +if ${runs_on_ci} + cargo_target_dir = get_env CARGO_MAKE_CRATE_TARGET_DIRECTORY + set_env TARGET_DIR ${cargo_target_dir} +end +'''] + +[env] +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +WORKING_DIR = "${CARGO_MAKE_WORKING_DIRECTORY}" +TARGET_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}" +LIBAFL_QEMU_CLONE_DIR = { value = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/qemu-libafl-bridge", condition = { env_not_set = [ + "LIBAFL_QEMU_DIR", +] } } + +LINUX_BUILDER_URL = "git@github.com:AFLplusplus/linux-qemu-image-builder.git" +LINUX_BUILDER_DIR = { value = "${TARGET_DIR}/linux_builder", condition = { env_not_set = [ + "LINUX_BUILDER_DIR", +] } } +LINUX_BUILDER_OUT = "${LINUX_BUILDER_DIR}/output" + +[tasks.target_dir] +condition = { files_not_exist = [ + "${TARGET_DIR}", + "${TARGET_DIR}/runtime", + "${TARGET_DIR}/setup", +] } +script_runner = "@shell" +script = ''' +mkdir -p ${TARGET_DIR}/runtime +mkdir -p ${TARGET_DIR}/setup +''' + +[tasks.linux_builder_dir] +condition = { files_not_exist = ["${LINUX_BUILDER_DIR}"] } +script_runner = "@shell" +script = ''' +git clone ${LINUX_BUILDER_URL} ${LINUX_BUILDER_DIR} +''' + +[tasks.target] +dependencies = ["build", "linux_builder_dir"] +script_runner = "@shell" +script = ''' +git -C ${LINUX_BUILDER_DIR} pull + +# Copy setup & runtime fixed files +cp -r ${WORKING_DIR}/setup/* ${LINUX_BUILDER_DIR}/setup/ +cp -r ${WORKING_DIR}/runtime/* ${LINUX_BUILDER_DIR}/runtime/ + +# Copy generated libafl qemu header files to setup +cp ${TARGET_DIR}/${PROFILE_DIR}/include/* ${LINUX_BUILDER_DIR}/setup/ + +${LINUX_BUILDER_DIR}/build.sh +''' + +[tasks.target_update] +dependencies = ["build", "linux_builder_dir"] +script_runner = "@shell" +script = ''' +git -C ${LINUX_BUILDER_DIR} pull + +# Copy setup & runtime fixed files +cp -r ${WORKING_DIR}/setup/* ${LINUX_BUILDER_DIR}/setup/ +cp -r ${WORKING_DIR}/runtime/* ${LINUX_BUILDER_DIR}/runtime/ + +# Copy generated libafl qemu header files to setup +cp ${TARGET_DIR}/${PROFILE_DIR}/include/* ${LINUX_BUILDER_DIR}/setup/ + +${LINUX_BUILDER_DIR}/update.sh +''' + +[tasks.build] +dependencies = ["target_dir"] +command = "cargo" +args = ["build", "--profile", "${PROFILE}", "--target-dir", "${TARGET_DIR}"] + +[tasks.run] +dependencies = ["build"] +script_runner = "@shell" +script = ''' +rm -rf "${WORKING_DIR}/corpus_gen" + +# Find the bios dir of LibAFL QEMU +if [ ! -z "${LIBAFL_QEMU_DIR}" ]; then + LIBAFL_QEMU_BIOS_DIR=${LIBAFL_QEMU_DIR}/build/qemu-bundle/usr/local/share/qemu +else + LIBAFL_QEMU_BIOS_DIR=${LIBAFL_QEMU_CLONE_DIR}/build/qemu-bundle/usr/local/share/qemu +fi + +${TARGET_DIR}/${PROFILE_DIR}/qemu_systemmode_linux_kernel \ + -accel tcg \ + -m 4G \ + -drive if=pflash,format=raw,readonly=on,file="${LINUX_BUILDER_OUT}/OVMF_CODE.fd" \ + -drive if=pflash,format=raw,snapshot=off,file="${LINUX_BUILDER_OUT}/OVMF_VARS.fd" \ + -blockdev filename="${LINUX_BUILDER_OUT}/linux.qcow2",node-name=storage,driver=file \ + -blockdev driver=qcow2,file=storage,node-name=disk \ + -device virtio-scsi-pci,id=scsi0 \ + -device scsi-hd,bus=scsi0.0,drive=disk,id=virtio-disk0,bootindex=1 \ + -L "${LIBAFL_QEMU_BIOS_DIR}" \ + -nographic \ + -monitor null \ + -serial null \ + -snapshot +''' + +[tasks.debug] +dependencies = ["build"] +command = "time" +args = [ + "${TARGET_DIR}/${PROFILE_DIR}/qemu_systemmode_linux_kernel", + "-accel", + "kvm", + "-m", + "4G", + "-drive", + "if=pflash,format=raw,readonly=on,file=${LINUX_BUILDER_OUT}/OVMF_CODE.fd", + "-drive", + "if=pflash,format=raw,snapshot=off,file=${LINUX_BUILDER_OUT}/OVMF_VARS.fd", + "-blockdev", + "filename=${LINUX_BUILDER_OUT}/linux.qcow2,node-name=storage,driver=file", + "-blockdev", + "driver=qcow2,file=storage,node-name=disk", + "-device", + "virtio-scsi-pci,id=scsi0", + "-device", + "scsi-hd,bus=scsi0.0,drive=disk,id=virtio-disk0,bootindex=1", + "-L", + "${LIBAFL_QEMU_DIR}/build/qemu-bundle/usr/local/share/qemu", + "-snapshot", +] + +[tasks.perf] +command = "perf" +args = [ + "record", + "--call-graph", + "dwarf", + "${TARGET_DIR}/${PROFILE_DIR}/qemu_systemmode_linux_kernel", + "-accel", + "tcg", + "-m", + "4G", + "-drive", + "if=pflash,format=raw,readonly=on,file=${LINUX_BUILDER_OUT}/OVMF_CODE.fd", + "-drive", + "if=pflash,format=raw,snapshot=off,file=${LINUX_BUILDER_OUT}/OVMF_VARS.fd", + "-blockdev", + "filename=${LINUX_BUILDER_OUT}/linux.qcow2,node-name=storage,driver=file", + "-blockdev", + "driver=qcow2,file=storage,node-name=disk", + "-device", + "virtio-scsi-pci,id=scsi0", + "-device", + "scsi-hd,bus=scsi0.0,drive=disk,id=virtio-disk0,bootindex=1", + "-L", + "${LIBAFL_QEMU_DIR}/build/qemu-bundle/usr/local/share/qemu", + "-snapshot", + # "-icount", "shift=auto,align=off,sleep=off", + # "-monitor", "null", + # "-serial", "null", + # "-nographic", +] + +[tasks.clean] +clear = true +script_runner = "@shell" +script = ''' +rm -rf ${CARGO_MAKE_CRATE_TARGET_DIRECTORY} +cargo clean +''' diff --git a/fuzzers/full_system/qemu_linux_kernel/README.md b/fuzzers/full_system/qemu_linux_kernel/README.md new file mode 100644 index 0000000000..528a3d4414 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/README.md @@ -0,0 +1,39 @@ +# LibAFL QEMU Systemmode for Linux kernel fuzzing + +This folder contains an example linux kernel fuzzer using qemu systemmode. + +## Warning + +For now, only the fuzzer is public. We plan to release the auto-builder for linux +images in the near future. +If you wish to experiment now, you will need to build the linux image manually. + +## Prerequisite + +TODO + +## Build + +To build the target: +```bash +cargo make target +``` + +To build the fuzzer: +```bash +cargo make build +``` + +It is also possible to update the target if it only changes "runtime" files. +This is equivalent to rebuilding the target, it is only faster since it does not need to rebuild the image from scratch. +Check [The linux builder repository](https://github.com/AFLplusplus/linux-qemu-image-builder.git) for more details on the specifics. +```bash +cargo make target_update +``` + +## Run + +To run the fuzzer: +```bash +cargo make run +``` \ No newline at end of file diff --git a/fuzzers/full_system/qemu_linux_kernel/build.rs b/fuzzers/full_system/qemu_linux_kernel/build.rs new file mode 100644 index 0000000000..05933c4fee --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/build.rs @@ -0,0 +1,5 @@ +use libafl_qemu_build::build_libafl_qemu; + +fn main() { + build_libafl_qemu(); +} diff --git a/fuzzers/full_system/qemu_linux_kernel/corpus/random b/fuzzers/full_system/qemu_linux_kernel/corpus/random new file mode 100644 index 0000000000..25175d51d3 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/corpus/random @@ -0,0 +1 @@ +yJv lpZGօrsY˗CMRպ}S7;Io76l1ޘ1R^Όp>G&_|1;ro4ƯUE<`L"6VƷk4/"tf7Fގd]ܮ%|8#wNUn8%4o˗ diff --git a/fuzzers/full_system/qemu_linux_kernel/corpus/zero b/fuzzers/full_system/qemu_linux_kernel/corpus/zero new file mode 100644 index 0000000000..112363ac19 Binary files /dev/null and b/fuzzers/full_system/qemu_linux_kernel/corpus/zero differ diff --git a/fuzzers/full_system/qemu_linux_kernel/runtime/entrypoint.sh b/fuzzers/full_system/qemu_linux_kernel/runtime/entrypoint.sh new file mode 100644 index 0000000000..3d2024e4f7 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/runtime/entrypoint.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +insmod /setup/harness.ko +/setup/user diff --git a/fuzzers/full_system/qemu_linux_kernel/setup/Makefile b/fuzzers/full_system/qemu_linux_kernel/setup/Makefile new file mode 100644 index 0000000000..a46095abea --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/setup/Makefile @@ -0,0 +1,9 @@ +obj-m += harness.o + +all: + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules + gcc -Wall -Werror -o user user.c + +clean: + make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean + rm user \ No newline at end of file diff --git a/fuzzers/full_system/qemu_linux_kernel/setup/harness.c b/fuzzers/full_system/qemu_linux_kernel/setup/harness.c new file mode 100644 index 0000000000..c7d58c78f2 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/setup/harness.c @@ -0,0 +1,170 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "x509-parser.h" +#include "libafl_qemu.h" + +#define MAX_DEV 1 + +#define BUF_SIZE 4096 + +static int harness_open(struct inode *inode, struct file *file); +static int harness_release(struct inode *inode, struct file *file); + +static const struct file_operations harness_fops = { + .owner = THIS_MODULE, + .open = harness_open, + .release = harness_release, +}; + +struct mychar_device_data { + struct cdev cdev; +}; + +static int dev_major = 0; +static struct class *harness_class = NULL; +static struct mychar_device_data harness_data; + +#define KPROBE_PRE_HANDLER(fname) \ + static int __kprobes fname(struct kprobe *p, struct pt_regs *regs) + +long unsigned int kln_addr = 0; +unsigned long (*kln_pointer)(const char *name) = NULL; + +static struct kprobe kp0, kp1; + +KPROBE_PRE_HANDLER(handler_pre0) { + kln_addr = (--regs->ip); + + return 0; +} + +KPROBE_PRE_HANDLER(handler_pre1) { + return 0; +} + +static int do_register_kprobe(struct kprobe *kp, char *symbol_name, + void *handler) { + int ret; + + kp->symbol_name = symbol_name; + kp->pre_handler = handler; + + ret = register_kprobe(kp); + if (ret < 0) { + pr_err("register_probe() for symbol %s failed, returned %d\n", symbol_name, + ret); + return ret; + } + + pr_info("Planted kprobe for symbol %s at %p\n", symbol_name, kp->addr); + + return ret; +} + +// Find kallsyms_lookup_name +// taken from +// https://github.com/zizzu0/LinuxKernelModules/blob/main/FindKallsymsLookupName.c +static int harness_find_kallsyms_lookup(void) { + int ret; + + ret = do_register_kprobe(&kp0, "kallsyms_lookup_name", handler_pre0); + if (ret < 0) return ret; + + ret = do_register_kprobe(&kp1, "kallsyms_lookup_name", handler_pre1); + if (ret < 0) { + unregister_kprobe(&kp0); + return ret; + } + + unregister_kprobe(&kp0); + unregister_kprobe(&kp1); + + lqprintf("kallsyms_lookup_name address = 0x%lx\n", kln_addr); + + kln_pointer = (unsigned long (*)(const char *name))kln_addr; + + return ret; +} + +static int harness_uevent(const struct device *dev, + struct kobj_uevent_env *env) { + add_uevent_var(env, "DEVMODE=%#o", 0666); + return 0; +} + +static int __init harness_init(void) { + int err; + dev_t dev; + + err = alloc_chrdev_region(&dev, 0, 1, "harness"); + + dev_major = MAJOR(dev); + + harness_class = class_create("harness"); + harness_class->dev_uevent = harness_uevent; + + cdev_init(&harness_data.cdev, &harness_fops); + harness_data.cdev.owner = THIS_MODULE; + + cdev_add(&harness_data.cdev, MKDEV(dev_major, 0), 1); + + device_create(harness_class, NULL, MKDEV(dev_major, 0), NULL, "harness"); + + harness_find_kallsyms_lookup(); + + return 0; +} + +static void __exit harness_exit(void) { + device_destroy(harness_class, MKDEV(dev_major, 0)); + + class_unregister(harness_class); + class_destroy(harness_class); + + unregister_chrdev_region(MKDEV(dev_major, 0), MINORMASK); +} + +static int harness_open(struct inode *inode, struct file *file) { + int ret; + lqprintf("harness: Device open\n"); + + char *data = kzalloc(BUF_SIZE, GFP_KERNEL); + data[0] = 0xff; // init page + + unsigned long x509_fn_addr = kln_pointer("x509_cert_parse"); + lqprintf("harness: x509 fn addr: 0x%lx\n", x509_fn_addr); + + // TODO: better filtering... + libafl_qemu_trace_vaddr_size(x509_fn_addr, 0x1000); + + libafl_qemu_test(); + + u64 buf_size = libafl_qemu_start_virt(data, BUF_SIZE); + + struct x509_certificate *cert_ret = x509_cert_parse(data, buf_size); + + libafl_qemu_end(LIBAFL_QEMU_END_OK); + + return 0; +} + +static int harness_release(struct inode *inode, struct file *file) { + lqprintf("harness: Device close\n"); + return 0; +} + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Slasti Mormanti"); + +module_init(harness_init); +module_exit(harness_exit); diff --git a/fuzzers/full_system/qemu_linux_kernel/setup/setup.sh b/fuzzers/full_system/qemu_linux_kernel/setup/setup.sh new file mode 100644 index 0000000000..56fafcc8b6 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/setup/setup.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +cd /setup +make clean +make -j +ls /setup \ No newline at end of file diff --git a/fuzzers/full_system/qemu_linux_kernel/setup/user.c b/fuzzers/full_system/qemu_linux_kernel/setup/user.c new file mode 100644 index 0000000000..4d3828f410 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/setup/user.c @@ -0,0 +1,19 @@ +#include +#include +#include +#include +#include + +int main() { + const char *device = "/dev/harness"; + int fd; + + // Open the device + fd = open(device, O_RDWR); + if (fd == -1) { return 1; } + + // Close the device + if (close(fd) == -1) { return 1; } + + return 0; +} \ No newline at end of file diff --git a/fuzzers/full_system/qemu_linux_kernel/setup/x509-parser.h b/fuzzers/full_system/qemu_linux_kernel/setup/x509-parser.h new file mode 100644 index 0000000000..7d4950aad0 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/setup/x509-parser.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* X.509 certificate parser + * + * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +// From https://lore.kernel.org/kvm/20240304065703.GA24373@wunner.de/T/ + +#include +#include +#include + +struct x509_certificate { + struct x509_certificate *next; + struct x509_certificate *signer; /* Certificate that signed this one */ + struct public_key *pub; /* Public key details */ + struct public_key_signature *sig; /* Signature parameters */ + char *issuer; /* Name of certificate issuer */ + char *subject; /* Name of certificate subject */ + struct asymmetric_key_id *id; /* Issuer + Serial number */ + struct asymmetric_key_id *skid; /* Subject + subjectKeyId (optional) */ + time64_t valid_from; + time64_t valid_to; + const void *tbs; /* Signed data */ + unsigned tbs_size; /* Size of signed data */ + unsigned raw_sig_size; /* Size of signature */ + const void *raw_sig; /* Signature data */ + const void *raw_serial; /* Raw serial number in ASN.1 */ + unsigned raw_serial_size; + unsigned raw_issuer_size; + const void *raw_issuer; /* Raw issuer name in ASN.1 */ + const void *raw_subject; /* Raw subject name in ASN.1 */ + unsigned raw_subject_size; + unsigned raw_skid_size; + const void *raw_skid; /* Raw subjectKeyId in ASN.1 */ + unsigned index; + bool seen; /* Infinite recursion prevention */ + bool verified; + bool self_signed; /* T if self-signed (check unsupported_sig too) */ + bool unsupported_sig; /* T if signature uses unsupported crypto */ + bool blacklisted; +}; + +struct x509_certificate *x509_cert_parse(const void *data, size_t datalen); +void x509_free_certificate(struct x509_certificate *cert); \ No newline at end of file diff --git a/fuzzers/full_system/qemu_linux_kernel/src/fuzzer.rs b/fuzzers/full_system/qemu_linux_kernel/src/fuzzer.rs new file mode 100644 index 0000000000..bb33be5f5f --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/src/fuzzer.rs @@ -0,0 +1,212 @@ +//! A fuzzer using qemu in systemmode for binary-only coverage of linux + +use core::time::Duration; +use std::{env, path::PathBuf, process}; + +use libafl::{ + corpus::{Corpus, InMemoryOnDiskCorpus, OnDiskCorpus}, + events::{launcher::Launcher, EventConfig}, + executors::ShadowExecutor, + feedback_or, feedback_or_fast, + feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, + fuzzer::{Fuzzer, StdFuzzer}, + inputs::BytesInput, + monitors::MultiMonitor, + mutators::{ + scheduled::{havoc_mutations, StdScheduledMutator}, + I2SRandReplaceBinonly, + }, + observers::{CanTrack, HitcountsMapObserver, TimeObserver, VariableMapObserver}, + schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, + stages::{ShadowTracingStage, StdMutationalStage}, + state::{HasCorpus, StdState}, + Error, +}; +use libafl_bolts::{ + core_affinity::Cores, + current_nanos, + ownedref::OwnedMutSlice, + rands::StdRand, + shmem::{ShMemProvider, StdShMemProvider}, + tuples::tuple_list, +}; +use libafl_qemu::{ + emu::Emulator, + executor::QemuExecutor, + modules::{ + cmplog::CmpLogObserver, + edges::{ + edges_map_mut_ptr, StdEdgeCoverageClassicModule, EDGES_MAP_ALLOCATED_SIZE, + MAX_EDGES_FOUND, + }, + CmpLogModule, + }, + // StdEmulatorDriver +}; + +pub fn fuzz() { + env_logger::init(); + + if let Ok(s) = env::var("FUZZ_SIZE") { + str::parse::(&s).expect("FUZZ_SIZE was not a number"); + }; + // Hardcoded parameters + let timeout = Duration::from_secs(60000); + let broker_port = 1337; + let cores = Cores::from_cmdline("1").unwrap(); + let corpus_dirs = [PathBuf::from("./corpus")]; + let objective_dir = PathBuf::from("./crashes"); + + let mut run_client = |state: Option<_>, mut mgr, _client_description| { + // Initialize QEMU + let args: Vec = env::args().collect(); + + // Choose modules to use + let modules = tuple_list!( + StdEdgeCoverageClassicModule::builder().build(), + CmpLogModule::default(), + ); + + // let driver = StdEmulatorDriver::builder() + // .print_commands(true) + // .build(); + + let emu = Emulator::builder() + .qemu_cli(args) + .modules(modules) + // .driver(driver) + .build()?; + + let devices = emu.list_devices(); + println!("Devices = {:?}", devices); + + // The wrapped harness function, calling out to the LLVM-style harness + let mut harness = + |emulator: &mut Emulator<_, _, _, _, _>, state: &mut _, input: &BytesInput| unsafe { + emulator.run(state, input).unwrap().try_into().unwrap() + }; + + // Create an observation channel using the coverage map + let edges_observer = unsafe { + HitcountsMapObserver::new(VariableMapObserver::from_mut_slice( + "edges", + OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_ALLOCATED_SIZE), + &raw mut MAX_EDGES_FOUND, + )) + .track_indices() + }; + + // Create an observation channel to keep track of the execution time + let time_observer = TimeObserver::new("time"); + + // Create a cmplog observer + let cmplog_observer = CmpLogObserver::new("cmplog", true); + + // Feedback to rate the interestingness of an input + // This one is composed by two Feedbacks in OR + let mut feedback = feedback_or!( + // New maximization map feedback linked to the edges observer and the feedback state + MaxMapFeedback::new(&edges_observer), + // Time feedback, this one does not need a feedback state + TimeFeedback::new(&time_observer) + ); + + // A feedback to choose if an input is a solution or not + let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new()); + + // If not restarting, create a State from scratch + let mut state = state.unwrap_or_else(|| { + StdState::new( + // RNG + StdRand::with_seed(current_nanos()), + // Corpus that will be evolved, we keep it in memory for performance + InMemoryOnDiskCorpus::new("corpus_gen").unwrap(), + // Corpus in which we store solutions (crashes in this example), + // on disk so the user can get them after stopping the fuzzer + OnDiskCorpus::new(objective_dir.clone()).unwrap(), + // States of the feedbacks. + // The feedbacks can report the data that should persist in the State. + &mut feedback, + // Same for objective feedbacks + &mut objective, + ) + .unwrap() + }); + + // A minimization+queue policy to get testcasess from the corpus + let scheduler = + IndexesLenTimeMinimizerScheduler::new(&edges_observer, QueueScheduler::new()); + + // A fuzzer with feedbacks and a corpus scheduler + let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); + + // Create a QEMU in-process executor + let mut executor = QemuExecutor::new( + emu, + &mut harness, + tuple_list!(edges_observer, time_observer), + &mut fuzzer, + &mut state, + &mut mgr, + timeout, + ) + .expect("Failed to create QemuExecutor"); + + // Instead of calling the timeout handler and restart the process, trigger a breakpoint ASAP + executor.break_on_timeout(); + + let mut executor = ShadowExecutor::new(executor, tuple_list!(cmplog_observer)); + + if state.must_load_initial_inputs() { + state + .load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &corpus_dirs) + .unwrap_or_else(|_| { + println!("Failed to load initial corpus at {:?}", &corpus_dirs); + process::exit(0); + }); + println!("We imported {} inputs from disk.", state.corpus().count()); + } + + // a CmpLog-based mutational stage + let i2s = StdMutationalStage::new(StdScheduledMutator::new(tuple_list!( + I2SRandReplaceBinonly::new() + ))); + + // Setup an havoc mutator with a mutational stage + let tracing = ShadowTracingStage::new(&mut executor); + let mutator = StdScheduledMutator::new(havoc_mutations()); + let mut stages = tuple_list!(tracing, i2s, StdMutationalStage::new(mutator),); + + match fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr) { + Ok(_) | Err(Error::ShuttingDown) => Ok(()), + Err(e) => return Err(e), + } + }; + + // The shared memory allocator + let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory"); + + // The stats reporter for the broker + let monitor = MultiMonitor::new(|s| println!("{s}")); + + // let monitor = SimpleMonitor::new(|s| println!("{s}")); + // let mut mgr = SimpleEventManager::new(monitor); + // run_client(None, mgr, 0); + + // Build and run a Launcher + match Launcher::builder() + .shmem_provider(shmem_provider) + .broker_port(broker_port) + .configuration(EventConfig::from_build_id()) + .monitor(monitor) + .run_client(&mut run_client) + .cores(&cores) + // .stdout_file(Some("/dev/null")) + .build() + .launch() + { + Ok(()) => (), + Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."), + Err(err) => panic!("Failed to run launcher: {err:?}"), + } +} diff --git a/fuzzers/full_system/qemu_linux_kernel/src/main.rs b/fuzzers/full_system/qemu_linux_kernel/src/main.rs new file mode 100644 index 0000000000..b2cf41cb60 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_kernel/src/main.rs @@ -0,0 +1,14 @@ +//! A systemmode linux kernel example +//! +#[cfg(all(target_os = "linux"))] +mod fuzzer; + +#[cfg(target_os = "linux")] +pub fn main() { + fuzzer::fuzz(); +} + +#[cfg(not(target_os = "linux"))] +pub fn main() { + panic!("qemu and libafl_qemu is only supported on linux!"); +} diff --git a/fuzzers/full_system/qemu_linux_process/.gitignore b/fuzzers/full_system/qemu_linux_process/.gitignore new file mode 100644 index 0000000000..a16dc44028 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_process/.gitignore @@ -0,0 +1,2 @@ +*.qcow2 +corpus_gen/ diff --git a/fuzzers/full_system/qemu_linux_process/Cargo.toml b/fuzzers/full_system/qemu_linux_process/Cargo.toml new file mode 100644 index 0000000000..0f2f626f92 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_process/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "qemu_linux_process" +version = "0.14.1" +authors = ["Romain Malmain "] +edition = "2021" + +[features] +shared = ["libafl_qemu/shared"] + +[profile.release] +incremental = true +debug = true +lto = "fat" +codegen-units = 1 + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_qemu = { path = "../../../libafl_qemu", features = [ + "x86_64", + "systemmode", + # "paranoid_debug" +] } +libafl_qemu_sys = { path = "../../../libafl_qemu/libafl_qemu_sys", features = [ + "x86_64", + "systemmode", + # "paranoid_debug" +] } +env_logger = "0.11.5" +libafl_targets = { path = "../../../libafl_targets" } + +[build-dependencies] +libafl_qemu_build = { path = "../../../libafl_qemu/libafl_qemu_build" } diff --git a/fuzzers/full_system/qemu_linux_process/Makefile.toml b/fuzzers/full_system/qemu_linux_process/Makefile.toml new file mode 100644 index 0000000000..8317a1ff1a --- /dev/null +++ b/fuzzers/full_system/qemu_linux_process/Makefile.toml @@ -0,0 +1,202 @@ +env_scripts = [''' +#!@duckscript +profile = get_env PROFILE + +if eq ${profile} "dev" + set_env PROFILE_DIR debug +else + set_env PROFILE_DIR ${profile} +end +''', ''' +#!@duckscript +runs_on_ci = get_env RUN_ON_CI + +if ${runs_on_ci} + cargo_target_dir = get_env CARGO_MAKE_CRATE_TARGET_DIRECTORY + set_env TARGET_DIR ${cargo_target_dir} + set_env KERNEL ${cargo_target_dir}/example.elf +end +'''] + +[env] +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +WORKING_DIR = "${CARGO_MAKE_WORKING_DIRECTORY}" +TARGET_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}" +LIBAFL_QEMU_CLONE_DIR = { value = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/qemu-libafl-bridge", condition = { env_not_set = [ + "LIBAFL_QEMU_DIR", +] } } + +LINUX_BUILDER_URL = "git@github.com:AFLplusplus/linux-qemu-image-builder.git" +LINUX_BUILDER_DIR = { value = "${TARGET_DIR}/linux_builder", condition = { env_not_set = [ + "LINUX_BUILDER_DIR", +] } } +LINUX_BUILDER_OUT = "${LINUX_BUILDER_DIR}/output" + +[tasks.target_dir] +condition = { files_not_exist = [ + "${TARGET_DIR}", + "${TARGET_DIR}/runtime", + "${TARGET_DIR}/setup", +] } +script_runner = "@shell" +script = ''' +mkdir -p ${TARGET_DIR}/runtime +mkdir -p ${TARGET_DIR}/setup +''' + +[tasks.linux_builder_dir] +condition = { files_not_exist = ["${LINUX_BUILDER_DIR}"] } +script_runner = "@shell" +script = ''' +git clone ${LINUX_BUILDER_URL} ${LINUX_BUILDER_DIR} +''' + +[tasks.compile_target] +dependencies = ["target_dir", "linux_builder_dir"] +command = "clang" +args = [ + "-O0", + "-static", + "${WORKING_DIR}/example/harness.c", + "-o", + "${TARGET_DIR}/runtime/harness", + "-I", + "${TARGET_DIR}/${PROFILE_DIR}/include", +] + +[tasks.target] +dependencies = ["build", "compile_target"] +script_runner = "@shell" +script = ''' +git -C ${LINUX_BUILDER_DIR} pull + +# Copy generated harness +cp -r ${TARGET_DIR}/runtime/* ${LINUX_BUILDER_DIR}/runtime/ + +# Copy setup & runtime fixed files +cp -r ${WORKING_DIR}/setup/* ${LINUX_BUILDER_DIR}/setup/ +cp -r ${WORKING_DIR}/runtime/* ${LINUX_BUILDER_DIR}/runtime/ + +${LINUX_BUILDER_DIR}/build.sh +''' + +[tasks.target_update] +dependencies = ["build", "compile_target"] +script_runner = "@shell" +script = ''' +# Copy generated harness +cp -r ${TARGET_DIR}/runtime/* ${LINUX_BUILDER_DIR}/runtime/ + +# Copy setup & runtime fixed files +cp -r ${WORKING_DIR}/runtime/* ${LINUX_BUILDER_DIR}/runtime/ + +${LINUX_BUILDER_DIR}/update.sh +''' + +[tasks.build] +dependencies = ["target_dir"] +command = "cargo" +args = ["build", "--profile", "${PROFILE}", "--target-dir", "${TARGET_DIR}"] + +[tasks.run] +dependencies = ["build"] +script_runner = "@shell" +script = ''' +rm -rf "${WORKING_DIR}/corpus_gen" + +# Find the bios dir of LibAFL QEMU +if [ ! -z "${LIBAFL_QEMU_DIR}" ]; then + LIBAFL_QEMU_BIOS_DIR=${LIBAFL_QEMU_DIR}/build/qemu-bundle/usr/local/share/qemu +else + LIBAFL_QEMU_BIOS_DIR=${LIBAFL_QEMU_CLONE_DIR}/build/qemu-bundle/usr/local/share/qemu +fi + +cp ${LINUX_BUILDER_OUT}/OVMF_CODE.fd ${LINUX_BUILDER_OUT}/OVMF_CODE.fd.clone +cp ${LINUX_BUILDER_OUT}/OVMF_VARS.fd ${LINUX_BUILDER_OUT}/OVMF_VARS.fd.clone +cp ${LINUX_BUILDER_OUT}/linux.qcow2 ${LINUX_BUILDER_OUT}/linux.qcow2.clone + +${TARGET_DIR}/${PROFILE_DIR}/qemu_linux_process \ + -accel tcg \ + -m 4G \ + -drive if=pflash,format=raw,file="${LINUX_BUILDER_OUT}/OVMF_CODE.fd" `# OVMF code pflash` \ + -drive if=pflash,format=raw,file="${LINUX_BUILDER_OUT}/OVMF_VARS.fd" `# OVMF vars pflash` \ + -device virtio-scsi-pci,id=scsi0 `# SCSI bus` \ + -device scsi-hd,bus=scsi0.0,drive=disk,id=virtio-disk0,bootindex=1 \ + -blockdev driver=file,filename="${LINUX_BUILDER_OUT}/linux.qcow2",node-name=storage `# Backend file of "disk"` \ + -blockdev driver=qcow2,file=storage,node-name=disk `# QCOW2 "disk"` \ + -L "${LIBAFL_QEMU_BIOS_DIR}" \ + -nographic \ + -monitor null \ + -serial null + + # -snapshot + #-blockdev driver=syx-cow-cache,file=storage,node-name=storage-syx \ +# gdb --args +''' + +[tasks.debug] +dependencies = ["build"] +command = "time" +args = [ + "${TARGET_DIR}/${PROFILE_DIR}/qemu_linux_process", + "-accel", + "tcg", + "-m", + "4G", + "-drive", + "if=pflash,format=raw,file=${LINUX_BUILDER_OUT}/OVMF_CODE.fd", + "-drive", + "if=pflash,format=raw,file=${LINUX_BUILDER_OUT}/OVMF_VARS.fd", + "-blockdev", + "filename=${LINUX_BUILDER_OUT}/linux.qcow2,node-name=storage,driver=file", + "-blockdev", + "driver=qcow2,file=storage,node-name=disk", + "-device", + "virtio-scsi-pci,id=scsi0", + "-device", + "scsi-hd,bus=scsi0.0,drive=disk,id=virtio-disk0,bootindex=1", + "-L", + "${LIBAFL_QEMU_DIR}/build/qemu-bundle/usr/local/share/qemu", + + #"-snapshot", +] + +[tasks.perf] +command = "perf" +args = [ + "record", + "--call-graph", + "dwarf", + "${TARGET_DIR}/${PROFILE_DIR}/qemu_linux_process", + "-accel", + "tcg", + "-m", + "4G", + "-drive", + "if=pflash,format=raw,readonly=on,file=${LINUX_BUILDER_OUT}/OVMF_CODE.fd", + "-drive", + "if=pflash,format=raw,snapshot=off,file=${LINUX_BUILDER_OUT}/OVMF_VARS.fd", + "-blockdev", + "filename=${LINUX_BUILDER_OUT}/linux.qcow2,node-name=storage,driver=file", + "-blockdev", + "driver=qcow2,file=storage,node-name=disk", + "-device", + "virtio-scsi-pci,id=scsi0", + "-device", + "scsi-hd,bus=scsi0.0,drive=disk,id=virtio-disk0,bootindex=1", + "-L", + "${LIBAFL_QEMU_DIR}/build/qemu-bundle/usr/local/share/qemu", + "-snapshot", + # "-icount", "shift=auto,align=off,sleep=off", + # "-monitor", "null", + # "-serial", "null", + # "-nographic", +] + +[tasks.clean] +clear = true +script_runner = "@shell" +script = ''' +rm -rf ${CARGO_MAKE_CRATE_TARGET_DIRECTORY} +cargo clean +''' diff --git a/fuzzers/full_system/qemu_linux_process/README.md b/fuzzers/full_system/qemu_linux_process/README.md new file mode 100644 index 0000000000..08c9f01e82 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_process/README.md @@ -0,0 +1,40 @@ +# LibAFL QEMU Systemmode for Linux process fuzzing + +This folder contains an example linux process fuzzer using qemu systemmode. +This is demo, most of the time for classic linux process fuzzing, it is better to use a more conventional method. + +## Warning + +For now, only the fuzzer is public. We plan to release the auto-builder for linux +images in the near future. +If you wish to experiment now, you will need to build the linux image manually. + +## Prerequisite + +TODO + +## Build + +To build the target: +```bash +cargo make target +``` + +To build the fuzzer: +```bash +cargo make build +``` + +It is also possible to update the target if it only changes "runtime" files. +This is equivalent to rebuilding the target, it is only faster since it does not need to rebuild the image from scratch. +Check [The linux builder repository](https://github.com/AFLplusplus/linux-qemu-image-builder.git) for more details on the specifics. +```bash +cargo make target_update +``` + +## Run + +To run the fuzzer: +```bash +cargo make run +``` \ No newline at end of file diff --git a/fuzzers/full_system/qemu_linux_process/build.rs b/fuzzers/full_system/qemu_linux_process/build.rs new file mode 100644 index 0000000000..05933c4fee --- /dev/null +++ b/fuzzers/full_system/qemu_linux_process/build.rs @@ -0,0 +1,5 @@ +use libafl_qemu_build::build_libafl_qemu; + +fn main() { + build_libafl_qemu(); +} diff --git a/fuzzers/full_system/qemu_linux_process/corpus/random b/fuzzers/full_system/qemu_linux_process/corpus/random new file mode 100644 index 0000000000..25175d51d3 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_process/corpus/random @@ -0,0 +1 @@ +yJv lpZGօrsY˗CMRպ}S7;Io76l1ޘ1R^Όp>G&_|1;ro4ƯUE<`L"6VƷk4/"tf7Fގd]ܮ%|8#wNUn8%4o˗ diff --git a/fuzzers/full_system/qemu_linux_process/corpus/zero b/fuzzers/full_system/qemu_linux_process/corpus/zero new file mode 100644 index 0000000000..112363ac19 Binary files /dev/null and b/fuzzers/full_system/qemu_linux_process/corpus/zero differ diff --git a/fuzzers/full_system/qemu_linux_process/example/harness.c b/fuzzers/full_system/qemu_linux_process/example/harness.c new file mode 100644 index 0000000000..fccc5d2b68 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_process/example/harness.c @@ -0,0 +1,44 @@ +// Adapted from +// https://github.com/google/fuzzing/blob/master/tutorial/libFuzzer/fuzz_me.cc +#include +#include +#include +#include + +#include + +bool FuzzMe(const uint8_t *Data, size_t DataSize) { + if (DataSize > 3) { + if (Data[0] == 'F') { + if (Data[1] == 'U') { + if (Data[2] == 'Z') { + if (Data[3] == 'Z') { return true; } + } + } + } + } + + return false; +} + +int main() { + // Prepare some space for the input + uint8_t Data[10] = {0}; + + lqprintf("Fuzzing starts\n"); + + // Start fuzzer here + size_t len = libafl_qemu_start_virt(Data, 10); + + // Call the target + bool ret = FuzzMe(Data, len); + + // Return to fuzzer + if (ret) { + // "Bug" has been triggered + libafl_qemu_end(LIBAFL_QEMU_END_CRASH); + } else { + // Everything went well + libafl_qemu_end(LIBAFL_QEMU_END_OK); + } +} \ No newline at end of file diff --git a/fuzzers/full_system/qemu_linux_process/runtime/entrypoint.sh b/fuzzers/full_system/qemu_linux_process/runtime/entrypoint.sh new file mode 100644 index 0000000000..876c015c63 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_process/runtime/entrypoint.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +/runtime/harness + +# shutdown now \ No newline at end of file diff --git a/fuzzers/full_system/qemu_linux_process/setup/setup.sh b/fuzzers/full_system/qemu_linux_process/setup/setup.sh new file mode 100644 index 0000000000..049b630ebd --- /dev/null +++ b/fuzzers/full_system/qemu_linux_process/setup/setup.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +# Nothing to do \ No newline at end of file diff --git a/fuzzers/full_system/qemu_linux_process/src/fuzzer.rs b/fuzzers/full_system/qemu_linux_process/src/fuzzer.rs new file mode 100644 index 0000000000..ec8cf7beed --- /dev/null +++ b/fuzzers/full_system/qemu_linux_process/src/fuzzer.rs @@ -0,0 +1,196 @@ +//! A fuzzer using qemu in systemmode for binary-only coverage of linux + +use core::time::Duration; +use std::{env, path::PathBuf, process}; + +use libafl::{ + corpus::{Corpus, InMemoryOnDiskCorpus, OnDiskCorpus}, + events::{launcher::Launcher, EventConfig}, + executors::ShadowExecutor, + feedback_or, feedback_or_fast, + feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, + fuzzer::{Fuzzer, StdFuzzer}, + inputs::BytesInput, + monitors::MultiMonitor, + mutators::{havoc_mutations, I2SRandReplaceBinonly, StdScheduledMutator}, + observers::{CanTrack, HitcountsMapObserver, TimeObserver, VariableMapObserver}, + schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, + stages::{ShadowTracingStage, StdMutationalStage}, + state::{HasCorpus, StdState}, + Error, +}; +use libafl_bolts::{ + core_affinity::Cores, + current_nanos, + ownedref::OwnedMutSlice, + rands::StdRand, + shmem::{ShMemProvider, StdShMemProvider}, + tuples::tuple_list, +}; +use libafl_qemu::{ + emu::Emulator, + executor::QemuExecutor, + modules::{cmplog::CmpLogObserver, edges::StdEdgeCoverageClassicModule, CmpLogModule}, +}; +use libafl_targets::{edges_map_mut_ptr, EDGES_MAP_DEFAULT_SIZE, MAX_EDGES_FOUND}; + +pub fn fuzz() { + env_logger::init(); + + if let Ok(s) = env::var("FUZZ_SIZE") { + str::parse::(&s).expect("FUZZ_SIZE was not a number"); + }; + // Hardcoded parameters + let timeout = Duration::from_secs(99999999); + let broker_port = 1338; + let cores = Cores::from_cmdline("1").unwrap(); + let corpus_dirs = [PathBuf::from("./corpus")]; + let objective_dir = PathBuf::from("./crashes"); + + let mut run_client = |state: Option<_>, mut mgr, _client_description| { + // Initialize QEMU + let args: Vec = env::args().collect(); + + // Create an observation channel using the coverage map + let mut edges_observer = unsafe { + HitcountsMapObserver::new(VariableMapObserver::from_mut_slice( + "edges", + OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_DEFAULT_SIZE), + &raw mut MAX_EDGES_FOUND, + )) + .track_indices() + }; + + // Choose modules to use + let modules = tuple_list!( + StdEdgeCoverageClassicModule::builder() + .map_observer(edges_observer.as_mut()) + .build()?, + CmpLogModule::default(), + ); + + let emu = Emulator::builder() + .qemu_cli(args) + .modules(modules) + .build()?; + + // The wrapped harness function, calling out to the LLVM-style harness + let mut harness = + |emulator: &mut Emulator<_, _, _, _, _>, state: &mut _, input: &BytesInput| unsafe { + emulator.run(state, input).unwrap().try_into().unwrap() + }; + + // Create an observation channel to keep track of the execution time + let time_observer = TimeObserver::new("time"); + + // Create a cmplog observer + let cmplog_observer = CmpLogObserver::new("cmplog", true); + + // Feedback to rate the interestingness of an input + // This one is composed by two Feedbacks in OR + let mut feedback = feedback_or!( + // New maximization map feedback linked to the edges observer and the feedback state + MaxMapFeedback::new(&edges_observer), + // Time feedback, this one does not need a feedback state + TimeFeedback::new(&time_observer) + ); + + // A feedback to choose if an input is a solution or not + let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new()); + + // If not restarting, create a State from scratch + let mut state = state.unwrap_or_else(|| { + StdState::new( + // RNG + StdRand::with_seed(current_nanos()), + // Corpus that will be evolved, we keep it in memory for performance + InMemoryOnDiskCorpus::new("corpus_gen").unwrap(), + // Corpus in which we store solutions (crashes in this example), + // on disk so the user can get them after stopping the fuzzer + OnDiskCorpus::new(objective_dir.clone()).unwrap(), + // States of the feedbacks. + // The feedbacks can report the data that should persist in the State. + &mut feedback, + // Same for objective feedbacks + &mut objective, + ) + .unwrap() + }); + + // A minimization+queue policy to get testcasess from the corpus + let scheduler = + IndexesLenTimeMinimizerScheduler::new(&edges_observer, QueueScheduler::new()); + + // A fuzzer with feedbacks and a corpus scheduler + let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); + + // Create a QEMU in-process executor + let mut executor = QemuExecutor::new( + emu, + &mut harness, + tuple_list!(edges_observer, time_observer), + &mut fuzzer, + &mut state, + &mut mgr, + timeout, + ) + .expect("Failed to create QemuExecutor"); + + // Instead of calling the timeout handler and restart the process, trigger a breakpoint ASAP + executor.break_on_timeout(); + + let mut executor = ShadowExecutor::new(executor, tuple_list!(cmplog_observer)); + + if state.must_load_initial_inputs() { + state + .load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &corpus_dirs) + .unwrap_or_else(|_| { + println!("Failed to load initial corpus at {:?}", &corpus_dirs); + process::exit(0); + }); + println!("We imported {} inputs from disk.", state.corpus().count()); + } + + // a CmpLog-based mutational stage + let i2s = StdMutationalStage::new(StdScheduledMutator::new(tuple_list!( + I2SRandReplaceBinonly::new() + ))); + + // Setup an havoc mutator with a mutational stage + let tracing = ShadowTracingStage::new(&mut executor); + let mutator = StdScheduledMutator::new(havoc_mutations()); + let mut stages = tuple_list!(tracing, i2s, StdMutationalStage::new(mutator),); + + match fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr) { + Ok(_) | Err(Error::ShuttingDown) => Ok(()), + Err(e) => return Err(e), + } + }; + + // The shared memory allocator + let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory"); + + // The stats reporter for the broker + let monitor = MultiMonitor::new(|s| println!("{s}")); + + // let monitor = SimpleMonitor::new(|s| println!("{s}")); + // let mut mgr = SimpleEventManager::new(monitor); + // run_client(None, mgr, 0); + + // Build and run a Launcher + match Launcher::builder() + .shmem_provider(shmem_provider) + .broker_port(broker_port) + .configuration(EventConfig::from_build_id()) + .monitor(monitor) + .run_client(&mut run_client) + .cores(&cores) + // .stdout_file(Some("/dev/null")) + .build() + .launch() + { + Ok(()) => (), + Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."), + Err(err) => panic!("Failed to run launcher: {err:?}"), + } +} diff --git a/fuzzers/full_system/qemu_linux_process/src/main.rs b/fuzzers/full_system/qemu_linux_process/src/main.rs new file mode 100644 index 0000000000..b2cf41cb60 --- /dev/null +++ b/fuzzers/full_system/qemu_linux_process/src/main.rs @@ -0,0 +1,14 @@ +//! A systemmode linux kernel example +//! +#[cfg(all(target_os = "linux"))] +mod fuzzer; + +#[cfg(target_os = "linux")] +pub fn main() { + fuzzer::fuzz(); +} + +#[cfg(not(target_os = "linux"))] +pub fn main() { + panic!("qemu and libafl_qemu is only supported on linux!"); +} diff --git a/fuzzers/baby_fuzzer_wasm/.cargo/config.toml b/fuzzers/fuzz_anything/baby_fuzzer_wasm/.cargo/config.toml similarity index 100% rename from fuzzers/baby_fuzzer_wasm/.cargo/config.toml rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/.cargo/config.toml diff --git a/fuzzers/baby_fuzzer_wasm/.gitignore b/fuzzers/fuzz_anything/baby_fuzzer_wasm/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer_wasm/.gitignore rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/.gitignore diff --git a/fuzzers/baby_fuzzer_wasm/Cargo.toml b/fuzzers/fuzz_anything/baby_fuzzer_wasm/Cargo.toml similarity index 54% rename from fuzzers/baby_fuzzer_wasm/Cargo.toml rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/Cargo.toml index 18548132ba..43275df37e 100644 --- a/fuzzers/baby_fuzzer_wasm/Cargo.toml +++ b/fuzzers/fuzz_anything/baby_fuzzer_wasm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "baby_fuzzer_wasm" -version = "0.1.0" +version = "0.14.1" authors = ["Addison Crump "] edition = "2018" @@ -11,24 +11,28 @@ crate-type = ["cdylib", "rlib"] default = ["console_error_panic_hook"] [dependencies] -js-sys = "0.3" -wasm-bindgen = "0.2.63" +js-sys = "0.3.70" +log = { version = "0.4.22", features = ["release_max_level_info"] } +wasm-bindgen = "0.2.93" -libafl = { path = "../../libafl", default-features = false } -libafl_bolts = { path = "../../libafl_bolts", default-features = false } +libafl = { path = "../../../libafl", default-features = false } +libafl_bolts = { path = "../../../libafl_bolts", default-features = false } # The `console_error_panic_hook` crate provides better debugging of panics by # logging them with `console.error`. This is great for development, but requires # all the `std::fmt` and `std::panicking` infrastructure, so isn't great for # code size when deploying. -console_error_panic_hook = { version = "0.1.6", optional = true } +console_error_panic_hook = { version = "0.1.7", optional = true } -[dependencies.web-sys] -version = "0.3" -features = ['console', 'Window', 'Performance', 'PerformanceTiming'] +web-sys = { version = "0.3.70", features = [ + 'console', + 'Window', + 'Performance', + 'PerformanceTiming', +] } [dev-dependencies] -wasm-bindgen-test = "0.3.13" +wasm-bindgen-test = "0.3.43" [profile.release] opt-level = 3 diff --git a/fuzzers/baby_fuzzer_wasm/Makefile.toml b/fuzzers/fuzz_anything/baby_fuzzer_wasm/Makefile.toml similarity index 84% rename from fuzzers/baby_fuzzer_wasm/Makefile.toml rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/Makefile.toml index 38324abd36..d57074798b 100644 --- a/fuzzers/baby_fuzzer_wasm/Makefile.toml +++ b/fuzzers/fuzz_anything/baby_fuzzer_wasm/Makefile.toml @@ -1,10 +1,10 @@ [env] -FUZZER_NAME="fuzzer" +FUZZER_NAME = "fuzzer" PROJECT_DIR = { script = ["pwd"] } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -26,4 +26,4 @@ args = ["test", "--chrome", "--headless"] # Clean [tasks.clean] command = "cargo" -args = ["clean"] \ No newline at end of file +args = ["clean"] diff --git a/fuzzers/baby_fuzzer_wasm/README.md b/fuzzers/fuzz_anything/baby_fuzzer_wasm/README.md similarity index 100% rename from fuzzers/baby_fuzzer_wasm/README.md rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/README.md diff --git a/fuzzers/baby_fuzzer_wasm/pkg/.gitignore b/fuzzers/fuzz_anything/baby_fuzzer_wasm/pkg/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer_wasm/pkg/.gitignore rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/pkg/.gitignore diff --git a/fuzzers/baby_fuzzer_wasm/pkg/index.html b/fuzzers/fuzz_anything/baby_fuzzer_wasm/pkg/index.html similarity index 100% rename from fuzzers/baby_fuzzer_wasm/pkg/index.html rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/pkg/index.html diff --git a/fuzzers/baby_fuzzer_wasm/pkg/package.json b/fuzzers/fuzz_anything/baby_fuzzer_wasm/pkg/package.json similarity index 100% rename from fuzzers/baby_fuzzer_wasm/pkg/package.json rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/pkg/package.json diff --git a/fuzzers/baby_fuzzer_wasm/src/lib.rs b/fuzzers/fuzz_anything/baby_fuzzer_wasm/src/lib.rs similarity index 94% rename from fuzzers/baby_fuzzer_wasm/src/lib.rs rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/src/lib.rs index 20f57a3072..6af42b74d2 100644 --- a/fuzzers/baby_fuzzer_wasm/src/lib.rs +++ b/fuzzers/fuzz_anything/baby_fuzzer_wasm/src/lib.rs @@ -11,11 +11,13 @@ use libafl::{ mutators::{havoc_mutations, StdScheduledMutator}, observers::StdMapObserver, schedulers::QueueScheduler, - stages::{ExecutionCountRestartHelperMetadata, StdMutationalStage}, + stages::{RetryCountRestartHelper, StdMutationalStage}, state::{HasSolutions, StdState}, Fuzzer, StdFuzzer, }; -use libafl_bolts::{rands::StdRand, serdeany::RegistryBuilder, tuples::tuple_list, AsSlice}; +use libafl_bolts::{ + nonzero, rands::StdRand, serdeany::RegistryBuilder, tuples::tuple_list, AsSlice, +}; use wasm_bindgen::prelude::*; use web_sys::{Performance, Window}; @@ -44,7 +46,7 @@ pub fn fuzz() { // No concurrency in WASM so these accesses are not racing. unsafe { RegistryBuilder::register::>(); - RegistryBuilder::register::(); + RegistryBuilder::register::(); } let mut signals = [0u8; 64]; @@ -126,7 +128,7 @@ pub fn fuzz() { .expect("Failed to create the Executor"); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/baby_fuzzer_wasm/src/utils.rs b/fuzzers/fuzz_anything/baby_fuzzer_wasm/src/utils.rs similarity index 100% rename from fuzzers/baby_fuzzer_wasm/src/utils.rs rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/src/utils.rs diff --git a/fuzzers/baby_fuzzer_wasm/tests/web.rs b/fuzzers/fuzz_anything/baby_fuzzer_wasm/tests/web.rs similarity index 100% rename from fuzzers/baby_fuzzer_wasm/tests/web.rs rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/tests/web.rs diff --git a/fuzzers/baby_fuzzer_wasm/webdriver.json b/fuzzers/fuzz_anything/baby_fuzzer_wasm/webdriver.json similarity index 100% rename from fuzzers/baby_fuzzer_wasm/webdriver.json rename to fuzzers/fuzz_anything/baby_fuzzer_wasm/webdriver.json diff --git a/fuzzers/baby_fuzzer_swap_differential/.gitignore b/fuzzers/fuzz_anything/baby_no_std/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer_swap_differential/.gitignore rename to fuzzers/fuzz_anything/baby_no_std/.gitignore diff --git a/fuzzers/fuzz_anything/baby_no_std/Cargo.toml b/fuzzers/fuzz_anything/baby_no_std/Cargo.toml new file mode 100644 index 0000000000..d0cd597a02 --- /dev/null +++ b/fuzzers/fuzz_anything/baby_no_std/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "baby_no_std" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { default-features = false, path = "../../../libafl" } +libafl_bolts = { default-features = false, path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +static-alloc = "0.2.3" + +[target.'cfg(unix)'.dependencies] +libc = "0.2.159" diff --git a/fuzzers/baby_no_std/Makefile.toml b/fuzzers/fuzz_anything/baby_no_std/Makefile.toml similarity index 54% rename from fuzzers/baby_no_std/Makefile.toml rename to fuzzers/fuzz_anything/baby_no_std/Makefile.toml index 7df8099700..2c31821d18 100644 --- a/fuzzers/baby_no_std/Makefile.toml +++ b/fuzzers/fuzz_anything/baby_no_std/Makefile.toml @@ -1,19 +1,28 @@ [env] -FUZZER_NAME="fuzzer" +FUZZER_NAME = "fuzzer" PROJECT_DIR = { script = ["pwd"] } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' # Fuzzer [tasks.build] command = "cargo" -args = ["build", "--profile", "${PROFILE}", "-Zbuild-std=core,alloc", "--target", "x86_64-unknown-linux-gnu"] +args = [ + "build", + "--profile", + "${PROFILE}", + "-Zbuild-std=core,alloc", + "--target", + "x86_64-unknown-linux-gnu", +] # Test [tasks.test] @@ -22,7 +31,7 @@ mac_alias = "unsupported" windows_alias = "unsupported" [tasks.test_unix] -script=''' +script = ''' cargo run -Zbuild-std=core,alloc --target x86_64-unknown-linux-gnu || true ''' dependencies = ["build"] @@ -33,4 +42,4 @@ script = "cargo +nightly build -Zbuild-std=core,alloc --target aarch64-unknown-n # Clean [tasks.clean] command = "cargo" -args = ["clean"] \ No newline at end of file +args = ["clean"] diff --git a/fuzzers/baby_no_std/README.md b/fuzzers/fuzz_anything/baby_no_std/README.md similarity index 100% rename from fuzzers/baby_no_std/README.md rename to fuzzers/fuzz_anything/baby_no_std/README.md diff --git a/fuzzers/baby_no_std/build.rs b/fuzzers/fuzz_anything/baby_no_std/build.rs similarity index 100% rename from fuzzers/baby_no_std/build.rs rename to fuzzers/fuzz_anything/baby_no_std/build.rs diff --git a/fuzzers/nautilus_sync/rust-toolchain b/fuzzers/fuzz_anything/baby_no_std/rust-toolchain similarity index 100% rename from fuzzers/nautilus_sync/rust-toolchain rename to fuzzers/fuzz_anything/baby_no_std/rust-toolchain diff --git a/fuzzers/baby_no_std/src/main.rs b/fuzzers/fuzz_anything/baby_no_std/src/main.rs similarity index 95% rename from fuzzers/baby_no_std/src/main.rs rename to fuzzers/fuzz_anything/baby_no_std/src/main.rs index 58b5acc28a..b07ffdea77 100644 --- a/fuzzers/baby_no_std/src/main.rs +++ b/fuzzers/fuzz_anything/baby_no_std/src/main.rs @@ -19,13 +19,13 @@ use libafl::{ generators::RandPrintablesGenerator, inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::StdMapObserver, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, state::StdState, }; -use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice}; +use libafl_bolts::{nonzero, rands::StdRand, tuples::tuple_list, AsSlice}; #[cfg(any(windows, unix))] use libc::{abort, printf}; use static_alloc::Bump; @@ -33,6 +33,7 @@ use static_alloc::Bump; #[global_allocator] static A: Bump<[u8; 512 * 1024 * 1024]> = Bump::uninit(); +#[cfg(not(test))] #[panic_handler] fn panic(_info: &PanicInfo) -> ! { #[cfg(unix)] @@ -143,7 +144,7 @@ pub extern "C" fn main(_argc: isize, _argv: *const *const u8) -> isize { .expect("Failed to create the Executor"); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/cargo_fuzz/Cargo.toml b/fuzzers/fuzz_anything/cargo_fuzz/Cargo.toml similarity index 50% rename from fuzzers/cargo_fuzz/Cargo.toml rename to fuzzers/fuzz_anything/cargo_fuzz/Cargo.toml index 9933834c8b..879df659c4 100644 --- a/fuzzers/cargo_fuzz/Cargo.toml +++ b/fuzzers/fuzz_anything/cargo_fuzz/Cargo.toml @@ -1,13 +1,23 @@ [package] name = "cargo_fuzz_test" edition = "2021" -version = "0.0.0" +version = "0.14.1" description = "test" -authors = ["Andrea Fioraldi ", "Dominik Maier "] +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] repository = "https://github.com/AFLplusplus/LibAFL/" keywords = ["fuzzing", "testing", "compiler"] -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/cargo_fuzz/Makefile.toml b/fuzzers/fuzz_anything/cargo_fuzz/Makefile.toml similarity index 100% rename from fuzzers/cargo_fuzz/Makefile.toml rename to fuzzers/fuzz_anything/cargo_fuzz/Makefile.toml diff --git a/fuzzers/fuzz_anything/cargo_fuzz/README.md b/fuzzers/fuzz_anything/cargo_fuzz/README.md new file mode 100644 index 0000000000..38a2b22414 --- /dev/null +++ b/fuzzers/fuzz_anything/cargo_fuzz/README.md @@ -0,0 +1,3 @@ +# cargo-fuzz + +This is a minimalistic example how to use LibAFL with cargo-fuzz. It uses the `libafl_libfuzzer` compatibility layer in order to be libFuzzer compatible. diff --git a/fuzzers/cargo_fuzz/fuzz/.gitignore b/fuzzers/fuzz_anything/cargo_fuzz/fuzz/.gitignore similarity index 100% rename from fuzzers/cargo_fuzz/fuzz/.gitignore rename to fuzzers/fuzz_anything/cargo_fuzz/fuzz/.gitignore diff --git a/fuzzers/cargo_fuzz/fuzz/Cargo.toml b/fuzzers/fuzz_anything/cargo_fuzz/fuzz/Cargo.toml similarity index 85% rename from fuzzers/cargo_fuzz/fuzz/Cargo.toml rename to fuzzers/fuzz_anything/cargo_fuzz/fuzz/Cargo.toml index e67e8e7c5f..8cca2dc640 100644 --- a/fuzzers/cargo_fuzz/fuzz/Cargo.toml +++ b/fuzzers/fuzz_anything/cargo_fuzz/fuzz/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libafl-fuzz" -version = "0.0.0" +version = "0.14.1" publish = false edition = "2021" @@ -15,7 +15,7 @@ cargo-fuzz = true path = ".." [dependencies.libfuzzer-sys] -path = "../../../libafl_libfuzzer" +path = "../../../../libafl_libfuzzer" package = "libafl_libfuzzer" [[bin]] diff --git a/fuzzers/cargo_fuzz/fuzz/fuzz_targets/fuzz_target_1.rs b/fuzzers/fuzz_anything/cargo_fuzz/fuzz/fuzz_targets/fuzz_target_1.rs similarity index 100% rename from fuzzers/cargo_fuzz/fuzz/fuzz_targets/fuzz_target_1.rs rename to fuzzers/fuzz_anything/cargo_fuzz/fuzz/fuzz_targets/fuzz_target_1.rs diff --git a/fuzzers/cargo_fuzz/src/lib.rs b/fuzzers/fuzz_anything/cargo_fuzz/src/lib.rs similarity index 79% rename from fuzzers/cargo_fuzz/src/lib.rs rename to fuzzers/fuzz_anything/cargo_fuzz/src/lib.rs index d2da382287..05a12ff8ed 100644 --- a/fuzzers/cargo_fuzz/src/lib.rs +++ b/fuzzers/fuzz_anything/cargo_fuzz/src/lib.rs @@ -1,5 +1,6 @@ +#[allow(clippy::collapsible_if)] pub fn do_thing(data: &[u8]) { - if data.get(0) == Some(&b'a') { + if data.first() == Some(&b'a') { if data.get(1) == Some(&b'b') { if data.get(2) == Some(&b'c') { if data.get(3) == Some(&b'd') { diff --git a/fuzzers/libafl_atheris/.gitignore b/fuzzers/fuzz_anything/libafl_atheris/.gitignore similarity index 100% rename from fuzzers/libafl_atheris/.gitignore rename to fuzzers/fuzz_anything/libafl_atheris/.gitignore diff --git a/fuzzers/fuzz_anything/libafl_atheris/Cargo.toml b/fuzzers/fuzz_anything/libafl_atheris/Cargo.toml new file mode 100644 index 0000000000..feafe5952b --- /dev/null +++ b/fuzzers/fuzz_anything/libafl_atheris/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "libafl_atheris" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "pointer_maps", + "sancov_cmplog", + "libfuzzer", + "sancov_8bit", +] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +clap = { version = "4.5.18", features = ["default"] } + +[lib] +name = "afl_atheris" +crate-type = ["staticlib"] diff --git a/fuzzers/libafl_atheris/Makefile b/fuzzers/fuzz_anything/libafl_atheris/Makefile similarity index 100% rename from fuzzers/libafl_atheris/Makefile rename to fuzzers/fuzz_anything/libafl_atheris/Makefile diff --git a/fuzzers/libafl_atheris/README.md b/fuzzers/fuzz_anything/libafl_atheris/README.md similarity index 100% rename from fuzzers/libafl_atheris/README.md rename to fuzzers/fuzz_anything/libafl_atheris/README.md diff --git a/fuzzers/libafl_atheris/src/lib.rs b/fuzzers/fuzz_anything/libafl_atheris/src/lib.rs similarity index 96% rename from fuzzers/libafl_atheris/src/lib.rs rename to fuzzers/fuzz_anything/libafl_atheris/src/lib.rs index 2ca552d22d..9d64af0899 100644 --- a/fuzzers/libafl_atheris/src/lib.rs +++ b/fuzzers/fuzz_anything/libafl_atheris/src/lib.rs @@ -22,7 +22,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::{I2SRandReplace, Tokens}, }, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, @@ -33,6 +34,7 @@ use libafl::{ }; use libafl_bolts::{ core_affinity::Cores, + nonzero, rands::StdRand, shmem::{ShMemProvider, StdShMemProvider}, tuples::{tuple_list, Merge}, @@ -128,7 +130,7 @@ pub extern "C" fn LLVMFuzzerRunDriver( // TODO: we need to handle Atheris calls to `exit` on errors somhow. - let mut run_client = |state: Option<_>, mut mgr, _core_id| { + let mut run_client = |state: Option<_>, mut mgr, _client_description| { // Create an observation channel using the coverage map let edges = unsafe { extra_counters() }; println!("edges: {:?}", edges); @@ -176,11 +178,11 @@ pub extern "C" fn LLVMFuzzerRunDriver( }); // Create a dictionary if not existing - if state.metadata_map().get::().is_none() { - for tokens_file in &token_files { - state.add_metadata(Tokens::from_file(tokens_file)?); - } - } + state.metadata_or_insert_with(|| { + Tokens::new() + .add_from_files(&token_files) + .expect("Could not read tokens files.") + }); // A minimization+queue policy to get testcasess from the corpus let scheduler = @@ -239,7 +241,7 @@ pub extern "C" fn LLVMFuzzerRunDriver( if state.must_load_initial_inputs() { if input_dirs.is_empty() { // Generator of printable bytearrays of max size 32 - let mut generator = RandBytesGenerator::new(32); + let mut generator = RandBytesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/baby_fuzzer_tokens/.gitignore b/fuzzers/fuzz_anything/push_harness/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer_tokens/.gitignore rename to fuzzers/fuzz_anything/push_harness/.gitignore diff --git a/fuzzers/push_harness/Cargo.toml b/fuzzers/fuzz_anything/push_harness/Cargo.toml similarity index 55% rename from fuzzers/push_harness/Cargo.toml rename to fuzzers/fuzz_anything/push_harness/Cargo.toml index 4903acfd84..0fc39c468c 100644 --- a/fuzzers/push_harness/Cargo.toml +++ b/fuzzers/fuzz_anything/push_harness/Cargo.toml @@ -1,7 +1,10 @@ [package] name = "push_harness" -version = "0.10.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] edition = "2021" [features] @@ -19,6 +22,7 @@ opt-level = 3 debug = true [dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } klo-routines = { version = "0.1.0", git = "https://github.com/andreafioraldi/klo-routines.git", rev = "b8e2fb6" } diff --git a/fuzzers/push_harness/README.md b/fuzzers/fuzz_anything/push_harness/README.md similarity index 100% rename from fuzzers/push_harness/README.md rename to fuzzers/fuzz_anything/push_harness/README.md diff --git a/fuzzers/push_harness/src/main.rs b/fuzzers/fuzz_anything/push_harness/src/main.rs similarity index 91% rename from fuzzers/push_harness/src/main.rs rename to fuzzers/fuzz_anything/push_harness/src/main.rs index 981a81ec5b..40b744c819 100644 --- a/fuzzers/push_harness/src/main.rs +++ b/fuzzers/fuzz_anything/push_harness/src/main.rs @@ -12,13 +12,13 @@ use libafl::{ generators::RandPrintablesGenerator, inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::StdMapObserver, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, state::StdState, }; -use libafl_bolts::{current_nanos, rands::StdRand, tuples::tuple_list, AsSlice}; +use libafl_bolts::{current_nanos, nonzero, rands::StdRand, tuples::tuple_list, AsSlice}; /// Coverage map with explicit assignments due to the lack of instrumentation static mut SIGNALS: [u8; 16] = [0; 16]; @@ -39,8 +39,12 @@ fn input_generator() { ExitKind::Ok }; + let signals_ptr = unsafe { &raw mut SIGNALS }; + let signals_len = unsafe { *signals_ptr }.len(); + // Create an observation channel using the signals map - let observer = unsafe { StdMapObserver::new("signals", &mut SIGNALS) }; + let observer = + unsafe { StdMapObserver::from_mut_ptr("signals", &raw mut SIGNALS as _, signals_len) }; // Feedback to rate the interestingness of an input let mut feedback = MaxMapFeedback::new(&observer); @@ -89,7 +93,7 @@ fn input_generator() { .expect("Failed to create the Executor"); // Generator of printable bytearrays of max size 32 - let mut generator = RandPrintablesGenerator::new(32); + let mut generator = RandPrintablesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state diff --git a/fuzzers/baby_fuzzer_unicode/.gitignore b/fuzzers/fuzz_anything/push_stage_harness/.gitignore similarity index 100% rename from fuzzers/baby_fuzzer_unicode/.gitignore rename to fuzzers/fuzz_anything/push_stage_harness/.gitignore diff --git a/fuzzers/fuzz_anything/push_stage_harness/Cargo.toml b/fuzzers/fuzz_anything/push_stage_harness/Cargo.toml new file mode 100644 index 0000000000..abe2527b32 --- /dev/null +++ b/fuzzers/fuzz_anything/push_stage_harness/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "push_stage_harness" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/push_stage_harness/README.md b/fuzzers/fuzz_anything/push_stage_harness/README.md similarity index 100% rename from fuzzers/push_stage_harness/README.md rename to fuzzers/fuzz_anything/push_stage_harness/README.md diff --git a/fuzzers/push_stage_harness/src/main.rs b/fuzzers/fuzz_anything/push_stage_harness/src/main.rs similarity index 96% rename from fuzzers/push_stage_harness/src/main.rs rename to fuzzers/fuzz_anything/push_stage_harness/src/main.rs index 27c78c26ef..e1eb51e1ff 100644 --- a/fuzzers/push_stage_harness/src/main.rs +++ b/fuzzers/fuzz_anything/push_stage_harness/src/main.rs @@ -15,7 +15,7 @@ use libafl::{ fuzzer::StdFuzzer, inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, observers::StdMapObserver, schedulers::{QueueScheduler, Scheduler}, stages::push::{PushStageSharedState, StdMutationalPushStage}, @@ -77,7 +77,7 @@ pub fn main() { let testcase = Testcase::new(BytesInput::new(b"aaaa".to_vec())); //self.feedback_mut().append_metadata(state, &mut testcase)?; let idx = state.corpus_mut().add(testcase).unwrap(); - scheduler.on_add(&mut state, idx).unwrap(); + >::on_add(&mut scheduler, &mut state, idx).unwrap(); // A fuzzer with feedbacks and a corpus scheduler let fuzzer = StdFuzzer::new(scheduler, feedback, objective); diff --git a/fuzzers/fuzzbench/Cargo.toml b/fuzzers/fuzzbench/Cargo.toml deleted file mode 100644 index 916ae5bca6..0000000000 --- a/fuzzers/fuzzbench/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "fuzzbench" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] -no_link_main = ["libafl_targets/libfuzzer_no_link_main"] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[profile.release-fuzzbench] -inherits = "release" -debug = false -strip = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "6.0" - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "sancov_cmplog", "libfuzzer"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -clap = { version = "4.0", features = ["default"] } -nix = { version = "0.29", features = ["fs"] } -mimalloc = { version = "*", default-features = false } - -[lib] -name = "fuzzbench" -crate-type = ["staticlib"] diff --git a/fuzzers/fuzzbench_ctx/Cargo.toml b/fuzzers/fuzzbench_ctx/Cargo.toml deleted file mode 100644 index 767a03ebfc..0000000000 --- a/fuzzers/fuzzbench_ctx/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "fuzzbench_ctx" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] -no_link_main = ["libafl_targets/libfuzzer_no_link_main"] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[profile.release-fuzzbench] -inherits = "release" -debug = false -strip = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "6.0" - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "sancov_cmplog", "libfuzzer", "sancov_ctx"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -clap = { version = "4.0", features = ["default"] } -nix = { version = "0.29", features = ["fs"] } -mimalloc = { version = "*", default-features = false } - -[lib] -name = "fuzzbench" -crate-type = ["staticlib"] diff --git a/fuzzers/fuzzbench_fork_qemu/Cargo.toml b/fuzzers/fuzzbench_fork_qemu/Cargo.toml deleted file mode 100644 index 7e907f0309..0000000000 --- a/fuzzers/fuzzbench_fork_qemu/Cargo.toml +++ /dev/null @@ -1,28 +0,0 @@ -[package] -name = "fuzzbench_fork_qemu" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[profile.release-fuzzbench] -inherits = "release" -debug = false -strip = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_qemu = { path = "../../libafl_qemu/", features = ["x86_64", "usermode"] } - -clap = { version = "4.0", features = ["default"] } -nix = { version = "0.29", features = ["fs"] } diff --git a/fuzzers/fuzzbench_fork_qemu/Makefile.toml b/fuzzers/fuzzbench_fork_qemu/Makefile.toml deleted file mode 100644 index 00ac7f259e..0000000000 --- a/fuzzers/fuzzbench_fork_qemu/Makefile.toml +++ /dev/null @@ -1,101 +0,0 @@ -# Variables -[env] -FUZZER_NAME='libpng_harness' -PROJECT_DIR = { script = ["pwd"] } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} - -[tasks.unsupported] -script_runner="@shell" -script=''' -echo "Qemu fuzzer not supported on windows" -''' - -# libpng -[tasks.libpng] -linux_alias = "libpng_unix" -mac_alias = "libpng_unix" -windows_alias = "unsupported" - -[tasks.libpng_unix] -condition = { files_not_exist = [ "./libpng-1.6.37" ] } -script_runner="@shell" -script=''' -wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz -tar -xvf v1.6.37.tar.gz -''' - -# fuzzer -[tasks.fuzzer] -linux_alias = "fuzzer_unix" -mac_alias = "fuzzer_unix" -windows_alias = "unsupported" - -[tasks.fuzzer_unix] -command = "cargo" -args = ["build", "--profile", "${PROFILE}"] - -# Harness -[tasks.harness] -linux_alias = "harness_unix" -mac_alias = "harness_unix" -windows_alias = "unsupported" - -[tasks.harness_unix] -script_runner="@shell" -script=''' -cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes -cd "${PROJECT_DIR}" -make -C libpng-1.6.37 -cc -c "${PROJECT_DIR}/libfuzzer_main.c" -# Build the libpng harness -c++ \ - ../libfuzzer_libpng/harness.cc \ - ./libpng-1.6.37/.libs/libpng16.a \ - ./libfuzzer_main.o \ - -I./libpng-1.6.37/ \ - -o ${FUZZER_NAME} \ - -lm -lz -''' -dependencies = ["libpng"] - -# Run the fuzzer -[tasks.run] -linux_alias = "run_unix" -mac_alias = "run_unix" -windows_alias = "unsupported" - -[tasks.run_unix] -command = "cargo" -args = ["run", "--profile", "${PROFILE_DIR}", "./${FUZZER_NAME}", "--", "--libafl-in", "../libfuzzer_libpng/corpus", "--libafl-out", "./out", "./${FUZZER_NAME}"] -dependencies = [ "harness", "fuzzer" ] - -# Run the fuzzer -[tasks.test] -linux_alias = "test_unix" -mac_alias = "test_unix" -windows_alias = "unsupported" - -# Short test -[tasks.test_unix] -script_runner = "@shell" -script=''' -echo "This test is skipped. QEMU-based fuzzer doesn't work on Github runners" -''' -dependencies = [ "harness", "fuzzer" ] - -# Clean up -[tasks.clean] -linux_alias = "clean_unix" -mac_alias = "clean_unix" -windows_alias = "unsupported" - -[tasks.clean_unix] -# Disable default `clean` definition -clear = true -script_runner="@shell" -script=''' -rm -f ./${FUZZER_NAME} libfuzzer_main.o -make -C libpng-1.6.37 clean -cargo clean -''' \ No newline at end of file diff --git a/fuzzers/fuzzbench_forkserver/Cargo.toml b/fuzzers/fuzzbench_forkserver/Cargo.toml deleted file mode 100644 index 00e4f6fcaa..0000000000 --- a/fuzzers/fuzzbench_forkserver/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "fuzzbench_forkserver" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[profile.release-fuzzbench] -inherits = "release" -debug = false -strip = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "6.0" - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/" } -clap = { version = "4.0", features = ["default"] } -nix = { version = "0.29", features = ["signal"] } diff --git a/fuzzers/fuzzbench_forkserver_cmplog/Cargo.toml b/fuzzers/fuzzbench_forkserver_cmplog/Cargo.toml deleted file mode 100644 index 31a80618d5..0000000000 --- a/fuzzers/fuzzbench_forkserver_cmplog/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "fuzzbench_forkserver_cmplog" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[profile.release-fuzzbench] -inherits = "release" -debug = false -strip = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "6.0" - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/" } -clap = { version = "4.0", features = ["default"] } -nix = { version = "0.29", features = ["signal"] } \ No newline at end of file diff --git a/fuzzers/fuzzbench_qemu/Cargo.toml b/fuzzers/fuzzbench_qemu/Cargo.toml deleted file mode 100644 index b4fd7da05d..0000000000 --- a/fuzzers/fuzzbench_qemu/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "fuzzbench_qemu" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[profile.release-fuzzbench] -inherits = "release" -debug = false -strip = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_qemu = { path = "../../libafl_qemu/", features = ["x86_64", "usermode"] } - -clap = { version = "4.0", features = ["default"] } -nix = { version = "0.29", features = ["fs"] } - diff --git a/fuzzers/fuzzbench_qemu/Makefile.toml b/fuzzers/fuzzbench_qemu/Makefile.toml deleted file mode 100644 index df0edf93ba..0000000000 --- a/fuzzers/fuzzbench_qemu/Makefile.toml +++ /dev/null @@ -1,101 +0,0 @@ -# Variables -[env] -FUZZER_NAME='libpng_harness' -PROJECT_DIR = { script = ["pwd"] } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} - -[tasks.unsupported] -script_runner="@shell" -script=''' -echo "Qemu fuzzer not supported on windows" -''' - -# libpng -[tasks.libpng] -linux_alias = "libpng_unix" -mac_alias = "libpng_unix" -windows_alias = "unsupported" - -[tasks.libpng_unix] -condition = { files_not_exist = [ "./libpng-1.6.37" ] } -script_runner="@shell" -script=''' -wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz -tar -xvf v1.6.37.tar.gz -''' - -# fuzzer -[tasks.fuzzer] -linux_alias = "fuzzer_unix" -mac_alias = "fuzzer_unix" -windows_alias = "unsupported" - -[tasks.fuzzer_unix] -command = "cargo" -args = ["build", "--profile", "${PROFILE}"] - -# Harness -[tasks.harness] -linux_alias = "harness_unix" -mac_alias = "harness_unix" -windows_alias = "unsupported" - -[tasks.harness_unix] -script_runner="@shell" -script=''' -cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes -cd "${PROJECT_DIR}" -make -C libpng-1.6.37 -cc -c "${PROJECT_DIR}/libfuzzer_main.c" -# Build the libpng harness -c++ \ - ../libfuzzer_libpng/harness.cc \ - ./libpng-1.6.37/.libs/libpng16.a \ - ./libfuzzer_main.o \ - -I./libpng-1.6.37/ \ - -o ${FUZZER_NAME} \ - -lm -lz -''' -dependencies = ["libpng"] - -# Run the fuzzer -[tasks.run] -linux_alias = "run_unix" -mac_alias = "run_unix" -windows_alias = "unsupported" - -[tasks.run_unix] -command = "cargo" -args = ["run", "--profile", "${PROFILE}", "./${FUZZER_NAME}", "--", "--libafl-in", "../libfuzzer_libpng/corpus", "--libafl-out", "./out", "./${FUZZER_NAME}"] -dependencies = [ "harness", "fuzzer" ] - -# Run the fuzzer -[tasks.test] -linux_alias = "test_unix" -mac_alias = "test_unix" -windows_alias = "unsupported" - -# Short test -[tasks.test_unix] -script_runner = "@shell" -script=''' -echo "This test is skipped. QEMU-based fuzzer doesn't work on Github runners" -''' -dependencies = [ "harness", "fuzzer" ] - -# Clean up -[tasks.clean] -linux_alias = "clean_unix" -mac_alias = "clean_unix" -windows_alias = "unsupported" - -[tasks.clean_unix] -# Disable default `clean` definition -clear = true -script_runner="@shell" -script=''' -rm -f ./${FUZZER_NAME} libfuzzer_main.o -make -C libpng-1.6.37 clean -cargo clean -''' diff --git a/fuzzers/fuzzbench_text/Cargo.toml b/fuzzers/fuzzbench_text/Cargo.toml deleted file mode 100644 index 7744263dad..0000000000 --- a/fuzzers/fuzzbench_text/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "fuzzbench_text" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] -no_link_main = ["libafl_targets/libfuzzer_no_link_main"] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "6.0" - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "sancov_cmplog", "libfuzzer"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -clap = { version = "4.0", features = ["default"] } -nix = { version = "0.29", features = ["fs"] } -mimalloc = { version = "*", default-features = false } -content_inspector = "0.2.4" -#log = "0.4" - -[lib] -name = "fuzzbench" -crate-type = ["staticlib"] diff --git a/fuzzers/inprocess/dynamic_analysis/Cargo.toml b/fuzzers/inprocess/dynamic_analysis/Cargo.toml new file mode 100644 index 0000000000..9199eef50e --- /dev/null +++ b/fuzzers/inprocess/dynamic_analysis/Cargo.toml @@ -0,0 +1,51 @@ +[package] +name = "dynamic_analysis" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] +no_link_main = ["libafl_targets/libfuzzer_no_link_main"] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[profile.release-fuzzbench] +inherits = "release" +debug = false +strip = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +env_logger = "0.11.5" +once_cell = "1.19.0" +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "sancov_cmplog", + "libfuzzer", + "function-logging", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } + +clap = { version = "4.5.18", features = ["default"] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +nix = { version = "0.29.0", features = ["fs"] } +mimalloc = { version = "0.1.43", default-features = false } + +[lib] +name = "fuzzbench" +crate-type = ["staticlib"] diff --git a/fuzzers/dynamic_analysis/Makefile.toml b/fuzzers/inprocess/dynamic_analysis/Makefile.toml similarity index 71% rename from fuzzers/dynamic_analysis/Makefile.toml rename to fuzzers/inprocess/dynamic_analysis/Makefile.toml index 036c83e5f2..ec5d74e0e4 100644 --- a/fuzzers/dynamic_analysis/Makefile.toml +++ b/fuzzers/inprocess/dynamic_analysis/Makefile.toml @@ -1,13 +1,17 @@ [env] PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -FUZZER_NAME="fuzzer" -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +FUZZER_NAME = "fuzzer" +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -38,7 +42,16 @@ windows_alias = "unsupported" [tasks.fuzz_o_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["--libafl-no-link", "-O3","-I", "./Little-CMS/include", "-c", "cms_transform_fuzzer.cc", "-o", "cms_transform_fuzzer.o"] +args = [ + "--libafl-no-link", + "-O3", + "-I", + "./Little-CMS/include", + "-c", + "cms_transform_fuzzer.cc", + "-o", + "cms_transform_fuzzer.o", +] dependencies = ["cc", "cxx"] # Fuzzer @@ -49,7 +62,15 @@ windows_alias = "unsupported" [tasks.fuzzer_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["--libafl", "cms_transform_fuzzer.o", "./Little-CMS/src/.libs/liblcms2.a", "-o", "${FUZZER_NAME}", "-lm", "-lz"] +args = [ + "--libafl", + "cms_transform_fuzzer.o", + "./Little-CMS/src/.libs/liblcms2.a", + "-o", + "${FUZZER_NAME}", + "-lm", + "-lz", +] dependencies = ["cc", "cxx", "fuzz_o"] # Run @@ -59,8 +80,8 @@ mac_alias = "run_unix" windows_alias = "unsupported" [tasks.run_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf libafl_unix_shmem_server || true mkdir in || true echo a > in/a @@ -76,8 +97,8 @@ mac_alias = "test_unix" windows_alias = "unsupported" [tasks.test_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf libafl_unix_shmem_server || true mkdir in || true echo a > in/a @@ -101,8 +122,8 @@ mac_alias = "clean_unix" windows_alias = "unsupported" [tasks.clean_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm ./${FUZZER_NAME} || true rm fuzz.o || true ''' diff --git a/fuzzers/inprocess/dynamic_analysis/README.md b/fuzzers/inprocess/dynamic_analysis/README.md new file mode 100644 index 0000000000..a3ffde7597 --- /dev/null +++ b/fuzzers/inprocess/dynamic_analysis/README.md @@ -0,0 +1,12 @@ +# Dynamic Analysis Fuzzer + +This fuzzer is to show how you can collect runtime analysis information during fuzzing using LibAFL. We use the [Little-CMS](https://github.com/mm2/Little-CMS) project for the example. +First, this fuzzer requires `nlohmann-json3-dev` to work. + +To run the fuzzer: + +1. Compile the fuzzer with `cargo build --release` +2. `mkdir analysis` and run `build.sh`. This will compile Little-CMS to extract the analysis information and generate a json file for each module. +3. run `python3 concatenator.py analysis`. This will concatenate all the json into one single file. This json file maps a function id to its analysis information. +4. Compile the fuzzer with `cargo make fuzzer`. This will instrument the fuzzer at every function entry point. Therefore, whenever we reach the entry of any function, we can log its id and logs what functions we executed. +5. Run the fuzzer `RUST_LOG=info ./fuzzer --input ./corpus --output ./out`. You'll see a stream of analysis data \ No newline at end of file diff --git a/fuzzers/dynamic_analysis/build.rs b/fuzzers/inprocess/dynamic_analysis/build.rs similarity index 100% rename from fuzzers/dynamic_analysis/build.rs rename to fuzzers/inprocess/dynamic_analysis/build.rs diff --git a/fuzzers/dynamic_analysis/build.sh b/fuzzers/inprocess/dynamic_analysis/build.sh similarity index 100% rename from fuzzers/dynamic_analysis/build.sh rename to fuzzers/inprocess/dynamic_analysis/build.sh diff --git a/fuzzers/dynamic_analysis/clean.sh b/fuzzers/inprocess/dynamic_analysis/clean.sh similarity index 100% rename from fuzzers/dynamic_analysis/clean.sh rename to fuzzers/inprocess/dynamic_analysis/clean.sh diff --git a/fuzzers/dynamic_analysis/cms_transform_fuzzer.cc b/fuzzers/inprocess/dynamic_analysis/cms_transform_fuzzer.cc similarity index 100% rename from fuzzers/dynamic_analysis/cms_transform_fuzzer.cc rename to fuzzers/inprocess/dynamic_analysis/cms_transform_fuzzer.cc diff --git a/fuzzers/dynamic_analysis/concatenator.py b/fuzzers/inprocess/dynamic_analysis/concatenator.py similarity index 78% rename from fuzzers/dynamic_analysis/concatenator.py rename to fuzzers/inprocess/dynamic_analysis/concatenator.py index 72f09f56c6..f24d06ad1e 100755 --- a/fuzzers/dynamic_analysis/concatenator.py +++ b/fuzzers/inprocess/dynamic_analysis/concatenator.py @@ -4,33 +4,35 @@ import os import json import sys + def concatenate_json_files(input_dir): json_files = [] for root, dirs, files in os.walk(input_dir): for file in files: - if file.endswith('.json'): + if file.endswith(".json"): json_files.append(os.path.join(root, file)) - + data = dict() for json_file in json_files: - with open(json_file, 'r') as file: + with open(json_file, "r") as file: if os.stat(json_file).st_size == 0: # skip empty file else json.load() fails continue json_data = json.load(file) print(type(json_data), file) data = data | json_data - - output_file = os.path.join(os.getcwd(), 'concatenated.json') - with open(output_file, 'w') as file: + + output_file = os.path.join(os.getcwd(), "concatenated.json") + with open(output_file, "w") as file: json.dump([data], file) - + print(f"JSON files concatenated successfully! Output file: {output_file}") -if __name__ == '__main__': + +if __name__ == "__main__": if len(sys.argv) != 2: print("Usage: python script.py ") sys.exit(1) - + input_directory = sys.argv[1] concatenate_json_files(input_directory) diff --git a/fuzzers/inprocess/dynamic_analysis/corpus/seed b/fuzzers/inprocess/dynamic_analysis/corpus/seed new file mode 100644 index 0000000000..84618ba47a Binary files /dev/null and b/fuzzers/inprocess/dynamic_analysis/corpus/seed differ diff --git a/fuzzers/dynamic_analysis/src/bin/libafl_cc.rs b/fuzzers/inprocess/dynamic_analysis/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/dynamic_analysis/src/bin/libafl_cc.rs rename to fuzzers/inprocess/dynamic_analysis/src/bin/libafl_cc.rs diff --git a/fuzzers/fuzzbench_text/src/bin/libafl_cxx.rs b/fuzzers/inprocess/dynamic_analysis/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/fuzzbench_text/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/dynamic_analysis/src/bin/libafl_cxx.rs diff --git a/fuzzers/dynamic_analysis/src/lib.rs b/fuzzers/inprocess/dynamic_analysis/src/lib.rs similarity index 94% rename from fuzzers/dynamic_analysis/src/lib.rs rename to fuzzers/inprocess/dynamic_analysis/src/lib.rs index 472d8b4e36..3a4d445d7a 100644 --- a/fuzzers/dynamic_analysis/src/lib.rs +++ b/fuzzers/inprocess/dynamic_analysis/src/lib.rs @@ -24,8 +24,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, mutators::{ - scheduled::havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, - StdMOptMutator, StdScheduledMutator, Tokens, + havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, StdMOptMutator, + StdScheduledMutator, Tokens, }, observers::{CanTrack, HitcountsMapObserver, ProfilingObserver, TimeObserver}, schedulers::{ @@ -176,7 +176,7 @@ fn run_testcases(filenames: &[&str]) { // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -191,7 +191,9 @@ fn run_testcases(filenames: &[&str]) { let mut buffer = vec![]; file.read_to_end(&mut buffer).expect("Buffer overflow"); - libfuzzer_test_one_input(&buffer); + unsafe { + libfuzzer_test_one_input(&buffer); + } } } @@ -250,7 +252,8 @@ fn fuzz( // Create an observation channel to keep track of the execution time let time_observer = TimeObserver::new("time"); - let func_list = unsafe { OwnedMutPtr::from_raw_mut(Lazy::force_mut(&mut FUNCTION_LIST)) }; + let func_list = + unsafe { OwnedMutPtr::from_raw_mut(Lazy::force_mut(&mut *&raw mut FUNCTION_LIST)) }; let profiling_observer = ProfilingObserver::new("concatenated.json", func_list)?; let callhook = CallHook::new(); @@ -296,7 +299,7 @@ fn fuzz( // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -311,12 +314,17 @@ fn fuzz( 5, )?; - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( &edges_observer, - StdWeightedScheduler::with_schedule(&mut state, &edges_observer, Some(PowerSchedule::FAST)), + StdWeightedScheduler::with_schedule( + &mut state, + &edges_observer, + Some(PowerSchedule::fast()), + ), ); // A fuzzer with feedbacks and a corpus scheduler @@ -326,7 +334,9 @@ fn fuzz( let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; diff --git a/fuzzers/dynamic_analysis/stub_rt.c b/fuzzers/inprocess/dynamic_analysis/stub_rt.c similarity index 100% rename from fuzzers/dynamic_analysis/stub_rt.c rename to fuzzers/inprocess/dynamic_analysis/stub_rt.c diff --git a/fuzzers/fuzzbench_forkserver/.gitignore b/fuzzers/inprocess/fuzzbench/.gitignore similarity index 100% rename from fuzzers/fuzzbench_forkserver/.gitignore rename to fuzzers/inprocess/fuzzbench/.gitignore diff --git a/fuzzers/inprocess/fuzzbench/Cargo.toml b/fuzzers/inprocess/fuzzbench/Cargo.toml new file mode 100644 index 0000000000..b3435c753b --- /dev/null +++ b/fuzzers/inprocess/fuzzbench/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "fuzzbench" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] +no_link_main = ["libafl_targets/libfuzzer_no_link_main"] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[profile.release-fuzzbench] +inherits = "release" +debug = false +strip = true + +[build-dependencies] +cc = { version = "1.0.106", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "sancov_cmplog", + "libfuzzer", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +clap = { version = "4.5.18", features = ["default"] } +nix = { version = "0.29.0", features = ["fs"] } +mimalloc = { version = "0.1.43", default-features = false } + +[lib] +name = "fuzzbench" +crate-type = ["staticlib"] diff --git a/fuzzers/fuzzbench/Makefile.toml b/fuzzers/inprocess/fuzzbench/Makefile.toml similarity index 81% rename from fuzzers/fuzzbench/Makefile.toml rename to fuzzers/inprocess/fuzzbench/Makefile.toml index c3bfdb78c1..47b59c49b0 100644 --- a/fuzzers/fuzzbench/Makefile.toml +++ b/fuzzers/inprocess/fuzzbench/Makefile.toml @@ -1,13 +1,17 @@ [env] PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -FUZZER_NAME="fuzzer" -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +FUZZER_NAME = "fuzzer" +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -59,8 +63,8 @@ mac_alias = "run_unix" windows_alias = "unsupported" [tasks.run_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf libafl_unix_shmem_server || true mkdir in || true echo a > in/a @@ -76,8 +80,8 @@ mac_alias = "test_unix" windows_alias = "unsupported" [tasks.test_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf libafl_unix_shmem_server || true mkdir in || true echo a > in/a @@ -101,8 +105,8 @@ mac_alias = "clean_unix" windows_alias = "unsupported" [tasks.clean_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm ./${FUZZER_NAME} || true rm fuzz.o || true ''' diff --git a/fuzzers/fuzzbench_qemu/README.md b/fuzzers/inprocess/fuzzbench/README.md similarity index 100% rename from fuzzers/fuzzbench_qemu/README.md rename to fuzzers/inprocess/fuzzbench/README.md diff --git a/fuzzers/inprocess/fuzzbench/fuzz.c b/fuzzers/inprocess/fuzzbench/fuzz.c new file mode 100644 index 0000000000..0460dd63d2 --- /dev/null +++ b/fuzzers/inprocess/fuzzbench/fuzz.c @@ -0,0 +1,19 @@ +#include +#include +#include + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + if (Size >= 8 && *(uint32_t *)Data == 0xaabbccdd) { abort(); } + char buf[8] = {'a', 'b', 'c', 'd'}; + + if (memcmp(Data, buf, 4) == 0) { abort(); } + return 0; +} + +/* +int main() { + + char buf [10] = {0}; + LLVMFuzzerTestOneInput(buf, 10); + +}*/ diff --git a/fuzzers/fuzzbench/src/bin/libafl_cc.rs b/fuzzers/inprocess/fuzzbench/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/fuzzbench/src/bin/libafl_cc.rs rename to fuzzers/inprocess/fuzzbench/src/bin/libafl_cc.rs diff --git a/fuzzers/libfuzzer_libmozjpeg/src/bin/libafl_cxx.rs b/fuzzers/inprocess/fuzzbench/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/libfuzzer_libmozjpeg/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/fuzzbench/src/bin/libafl_cxx.rs diff --git a/fuzzers/fuzzbench/src/lib.rs b/fuzzers/inprocess/fuzzbench/src/lib.rs similarity index 95% rename from fuzzers/fuzzbench/src/lib.rs rename to fuzzers/inprocess/fuzzbench/src/lib.rs index 9b162a48e3..710148e9cc 100644 --- a/fuzzers/fuzzbench/src/lib.rs +++ b/fuzzers/inprocess/fuzzbench/src/lib.rs @@ -25,8 +25,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, mutators::{ - scheduled::havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, - StdMOptMutator, StdScheduledMutator, Tokens, + havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, StdMOptMutator, + StdScheduledMutator, Tokens, }, observers::{CanTrack, HitcountsMapObserver, TimeObserver}, schedulers::{ @@ -174,7 +174,7 @@ fn run_testcases(filenames: &[&str]) { // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -189,7 +189,9 @@ fn run_testcases(filenames: &[&str]) { let mut buffer = vec![]; file.read_to_end(&mut buffer).expect("Buffer overflow"); - libfuzzer_test_one_input(&buffer); + unsafe { + libfuzzer_test_one_input(&buffer); + } } } @@ -290,7 +292,7 @@ fn fuzz( // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -305,12 +307,17 @@ fn fuzz( 5, )?; - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( &edges_observer, - StdWeightedScheduler::with_schedule(&mut state, &edges_observer, Some(PowerSchedule::FAST)), + StdWeightedScheduler::with_schedule( + &mut state, + &edges_observer, + Some(PowerSchedule::fast()), + ), ); // A fuzzer with feedbacks and a corpus scheduler @@ -320,7 +327,9 @@ fn fuzz( let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; diff --git a/fuzzers/fuzzbench/stub_rt.c b/fuzzers/inprocess/fuzzbench/stub_rt.c similarity index 100% rename from fuzzers/fuzzbench/stub_rt.c rename to fuzzers/inprocess/fuzzbench/stub_rt.c diff --git a/fuzzers/inprocess/fuzzbench_ctx/Cargo.toml b/fuzzers/inprocess/fuzzbench_ctx/Cargo.toml new file mode 100644 index 0000000000..5fa2b86c14 --- /dev/null +++ b/fuzzers/inprocess/fuzzbench_ctx/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "fuzzbench_ctx" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] +no_link_main = ["libafl_targets/libfuzzer_no_link_main"] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[profile.release-fuzzbench] +inherits = "release" +debug = false +strip = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "sancov_cmplog", + "libfuzzer", + "sancov_ctx", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +clap = { version = "4.5.18", features = ["default"] } +nix = { version = "0.29.0", features = ["fs"] } +mimalloc = { version = "0.1.43", default-features = false } + +[lib] +name = "fuzzbench" +crate-type = ["staticlib"] diff --git a/fuzzers/fuzzbench_ctx/Makefile.toml b/fuzzers/inprocess/fuzzbench_ctx/Makefile.toml similarity index 81% rename from fuzzers/fuzzbench_ctx/Makefile.toml rename to fuzzers/inprocess/fuzzbench_ctx/Makefile.toml index c3bfdb78c1..47b59c49b0 100644 --- a/fuzzers/fuzzbench_ctx/Makefile.toml +++ b/fuzzers/inprocess/fuzzbench_ctx/Makefile.toml @@ -1,13 +1,17 @@ [env] PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -FUZZER_NAME="fuzzer" -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +FUZZER_NAME = "fuzzer" +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -59,8 +63,8 @@ mac_alias = "run_unix" windows_alias = "unsupported" [tasks.run_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf libafl_unix_shmem_server || true mkdir in || true echo a > in/a @@ -76,8 +80,8 @@ mac_alias = "test_unix" windows_alias = "unsupported" [tasks.test_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf libafl_unix_shmem_server || true mkdir in || true echo a > in/a @@ -101,8 +105,8 @@ mac_alias = "clean_unix" windows_alias = "unsupported" [tasks.clean_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm ./${FUZZER_NAME} || true rm fuzz.o || true ''' diff --git a/fuzzers/inprocess/fuzzbench_ctx/fuzz.c b/fuzzers/inprocess/fuzzbench_ctx/fuzz.c new file mode 100644 index 0000000000..0460dd63d2 --- /dev/null +++ b/fuzzers/inprocess/fuzzbench_ctx/fuzz.c @@ -0,0 +1,19 @@ +#include +#include +#include + +int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + if (Size >= 8 && *(uint32_t *)Data == 0xaabbccdd) { abort(); } + char buf[8] = {'a', 'b', 'c', 'd'}; + + if (memcmp(Data, buf, 4) == 0) { abort(); } + return 0; +} + +/* +int main() { + + char buf [10] = {0}; + LLVMFuzzerTestOneInput(buf, 10); + +}*/ diff --git a/fuzzers/fuzzbench_ctx/src/bin/libafl_cc.rs b/fuzzers/inprocess/fuzzbench_ctx/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/fuzzbench_ctx/src/bin/libafl_cc.rs rename to fuzzers/inprocess/fuzzbench_ctx/src/bin/libafl_cc.rs diff --git a/fuzzers/libfuzzer_libpng/src/bin/libafl_cxx.rs b/fuzzers/inprocess/fuzzbench_ctx/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/libfuzzer_libpng/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/fuzzbench_ctx/src/bin/libafl_cxx.rs diff --git a/fuzzers/fuzzbench_ctx/src/lib.rs b/fuzzers/inprocess/fuzzbench_ctx/src/lib.rs similarity index 94% rename from fuzzers/fuzzbench_ctx/src/lib.rs rename to fuzzers/inprocess/fuzzbench_ctx/src/lib.rs index b407fd01af..5d96df01a0 100644 --- a/fuzzers/fuzzbench_ctx/src/lib.rs +++ b/fuzzers/inprocess/fuzzbench_ctx/src/lib.rs @@ -28,8 +28,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, mutators::{ - scheduled::havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, - StdMOptMutator, StdScheduledMutator, Tokens, + havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, StdMOptMutator, + StdScheduledMutator, Tokens, }, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, schedulers::{ @@ -55,7 +55,7 @@ use libafl_bolts::{ use libafl_targets::autotokens; use libafl_targets::{ edges_map_mut_ptr, libfuzzer_initialize, libfuzzer_test_one_input, CmpLogObserver, CtxHook, - EDGES_MAP_SIZE_IN_USE, + EDGES_MAP_DEFAULT_SIZE, }; #[cfg(unix)] use nix::unistd::dup; @@ -179,7 +179,7 @@ fn run_testcases(filenames: &[&str]) { // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -194,7 +194,9 @@ fn run_testcases(filenames: &[&str]) { let mut buffer = vec![]; file.read_to_end(&mut buffer).expect("Buffer overflow"); - libfuzzer_test_one_input(&buffer); + unsafe { + libfuzzer_test_one_input(&buffer); + } } } @@ -250,7 +252,7 @@ fn fuzz( let edges_observer = HitcountsMapObserver::new(unsafe { StdMapObserver::from_mut_slice( "edges", - OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_SIZE_IN_USE), + OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_DEFAULT_SIZE), ) }) .track_indices(); @@ -300,7 +302,7 @@ fn fuzz( // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -315,12 +317,17 @@ fn fuzz( 5, )?; - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( &edges_observer, - StdWeightedScheduler::with_schedule(&mut state, &edges_observer, Some(PowerSchedule::FAST)), + StdWeightedScheduler::with_schedule( + &mut state, + &edges_observer, + Some(PowerSchedule::fast()), + ), ); // A fuzzer with feedbacks and a corpus scheduler @@ -330,7 +337,9 @@ fn fuzz( let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; diff --git a/fuzzers/fuzzbench_ctx/stub_rt.c b/fuzzers/inprocess/fuzzbench_ctx/stub_rt.c similarity index 100% rename from fuzzers/fuzzbench_ctx/stub_rt.c rename to fuzzers/inprocess/fuzzbench_ctx/stub_rt.c diff --git a/fuzzers/fuzzbench_text/.gitignore b/fuzzers/inprocess/fuzzbench_text/.gitignore similarity index 100% rename from fuzzers/fuzzbench_text/.gitignore rename to fuzzers/inprocess/fuzzbench_text/.gitignore diff --git a/fuzzers/inprocess/fuzzbench_text/Cargo.toml b/fuzzers/inprocess/fuzzbench_text/Cargo.toml new file mode 100644 index 0000000000..a66f66ab49 --- /dev/null +++ b/fuzzers/inprocess/fuzzbench_text/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "fuzzbench_text" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] +no_link_main = ["libafl_targets/libfuzzer_no_link_main"] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "sancov_cmplog", + "libfuzzer", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +clap = { version = "4.5.18", features = ["default"] } +nix = { version = "0.29.0", features = ["fs"] } +mimalloc = { version = "0.1.43", default-features = false } +content_inspector = "0.2.4" + +[lib] +name = "fuzzbench" +crate-type = ["staticlib"] diff --git a/fuzzers/fuzzbench_text/Makefile.toml b/fuzzers/inprocess/fuzzbench_text/Makefile.toml similarity index 78% rename from fuzzers/fuzzbench_text/Makefile.toml rename to fuzzers/inprocess/fuzzbench_text/Makefile.toml index 94faa860ce..aa9757af28 100644 --- a/fuzzers/fuzzbench_text/Makefile.toml +++ b/fuzzers/inprocess/fuzzbench_text/Makefile.toml @@ -1,13 +1,17 @@ [env] PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -FUZZER_NAME="fuzzer" -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +FUZZER_NAME = "fuzzer" +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -19,7 +23,7 @@ windows_alias = "unsupported" [tasks.cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] linux_alias = "cc_unix" @@ -28,7 +32,7 @@ windows_alias = "unsupported" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] # fuzz.o File [tasks.fuzz_o] @@ -59,8 +63,8 @@ mac_alias = "run_unix" windows_alias = "unsupported" [tasks.run_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf libafl_unix_shmem_server || true mkdir in || true echo a > in/a @@ -77,8 +81,8 @@ mac_alias = "test_unix" windows_alias = "unsupported" [tasks.test_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf libafl_unix_shmem_server || true mkdir in || true echo a > in/a @@ -103,8 +107,8 @@ mac_alias = "clean_unix" windows_alias = "unsupported" [tasks.clean_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm ./${FUZZER_NAME} || true rm fuzz.o || true -''' \ No newline at end of file +''' diff --git a/fuzzers/fuzzbench_text/README.md b/fuzzers/inprocess/fuzzbench_text/README.md similarity index 87% rename from fuzzers/fuzzbench_text/README.md rename to fuzzers/inprocess/fuzzbench_text/README.md index 13b314743c..7a180e1932 100644 --- a/fuzzers/fuzzbench_text/README.md +++ b/fuzzers/inprocess/fuzzbench_text/README.md @@ -4,7 +4,7 @@ This folder contains an example fuzzer tailored for fuzzbench. It uses the best possible setting, with the exception of a SimpleRestartingEventManager instead of an LlmpEventManager - since fuzzbench is single threaded. Real fuzz campaigns should consider using multithreaded LlmpEventManager, see the other examples. -This fuzzer autodetect if the dictionary and the initial inputs are text or binary data, and enables Grimoire in case of text. +This fuzzer autodetect if the passed-in tokens and the initial inputs are text or binary data, and enables Grimoire in case of text. ## Build diff --git a/fuzzers/fuzzbench_text/fuzz.c b/fuzzers/inprocess/fuzzbench_text/fuzz.c similarity index 100% rename from fuzzers/fuzzbench_text/fuzz.c rename to fuzzers/inprocess/fuzzbench_text/fuzz.c diff --git a/fuzzers/fuzzbench_text/src/bin/libafl_cc.rs b/fuzzers/inprocess/fuzzbench_text/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/fuzzbench_text/src/bin/libafl_cc.rs rename to fuzzers/inprocess/fuzzbench_text/src/bin/libafl_cc.rs diff --git a/fuzzers/libfuzzer_libpng_accounting/src/bin/libafl_cxx.rs b/fuzzers/inprocess/fuzzbench_text/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_accounting/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/fuzzbench_text/src/bin/libafl_cxx.rs diff --git a/fuzzers/fuzzbench_text/src/lib.rs b/fuzzers/inprocess/fuzzbench_text/src/lib.rs similarity index 96% rename from fuzzers/fuzzbench_text/src/lib.rs rename to fuzzers/inprocess/fuzzbench_text/src/lib.rs index b8924015ca..1218689df9 100644 --- a/fuzzers/fuzzbench_text/src/lib.rs +++ b/fuzzers/inprocess/fuzzbench_text/src/lib.rs @@ -30,7 +30,7 @@ use libafl::{ GrimoireExtensionMutator, GrimoireRandomDeleteMutator, GrimoireRecursiveReplacementMutator, GrimoireStringReplacementMutator, }, - scheduled::havoc_mutations, + havoc_mutations, token_mutations::I2SRandReplace, tokens_mutations, StdMOptMutator, StdScheduledMutator, Tokens, }, @@ -236,7 +236,7 @@ fn run_testcases(filenames: &[&str]) { // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -251,7 +251,9 @@ fn run_testcases(filenames: &[&str]) { let mut buffer = vec![]; file.read_to_end(&mut buffer).expect("Buffer overflow"); - libfuzzer_test_one_input(&buffer); + unsafe { + libfuzzer_test_one_input(&buffer); + } } } @@ -357,7 +359,7 @@ fn fuzz_binary( // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -372,7 +374,8 @@ fn fuzz_binary( 5, )?; - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( @@ -380,7 +383,7 @@ fn fuzz_binary( StdWeightedScheduler::with_schedule( &mut state, &edges_observer, - Some(PowerSchedule::EXPLORE), + Some(PowerSchedule::explore()), ), ); @@ -391,7 +394,9 @@ fn fuzz_binary( let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -569,7 +574,7 @@ fn fuzz_text( // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -584,7 +589,8 @@ fn fuzz_text( 5, )?; - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); let grimoire_mutator = StdScheduledMutator::with_max_stack_pow( tuple_list!( @@ -597,6 +603,7 @@ fn fuzz_text( ), 3, ); + let grimoire = StdMutationalStage::transforming(grimoire_mutator); // A minimization+queue policy to get testcasess from the corpus @@ -605,7 +612,7 @@ fn fuzz_text( StdWeightedScheduler::with_schedule( &mut state, &edges_observer, - Some(PowerSchedule::EXPLORE), + Some(PowerSchedule::explore()), ), ); @@ -616,7 +623,9 @@ fn fuzz_text( let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; diff --git a/fuzzers/libfuzzer_libmozjpeg/.gitignore b/fuzzers/inprocess/libfuzzer_libmozjpeg/.gitignore similarity index 100% rename from fuzzers/libfuzzer_libmozjpeg/.gitignore rename to fuzzers/inprocess/libfuzzer_libmozjpeg/.gitignore diff --git a/fuzzers/inprocess/libfuzzer_libmozjpeg/Cargo.toml b/fuzzers/inprocess/libfuzzer_libmozjpeg/Cargo.toml new file mode 100644 index 0000000000..038116b6c1 --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_libmozjpeg/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "libfuzzer_libmozjpeg" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_edges", + "sancov_value_profile", + "libfuzzer", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } + +[lib] +name = "libfuzzer_libmozjpeg" +crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libmozjpeg/Makefile.toml b/fuzzers/inprocess/libfuzzer_libmozjpeg/Makefile.toml similarity index 71% rename from fuzzers/libfuzzer_libmozjpeg/Makefile.toml rename to fuzzers/inprocess/libfuzzer_libmozjpeg/Makefile.toml index f96e02ef2c..d74d288058 100644 --- a/fuzzers/libfuzzer_libmozjpeg/Makefile.toml +++ b/fuzzers/inprocess/libfuzzer_libmozjpeg/Makefile.toml @@ -1,17 +1,21 @@ # Variables [env] -FUZZER_NAME='fuzzer_mozjpeg' -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +FUZZER_NAME = 'fuzzer_mozjpeg' +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' LIBAFL_CXX = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx' FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' PROJECT_DIR = { script = ["pwd"] } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this platform" ''' @@ -22,9 +26,9 @@ mac_alias = "mozjpeg_unix" windows_alias = "unsupported" [tasks.mozjpeg_unix] -condition = { files_not_exist = ["./mozjpeg-4.0.3"]} -script_runner="@shell" -script=''' +condition = { files_not_exist = ["./mozjpeg-4.0.3"] } +script_runner = "@shell" +script = ''' wget https://github.com/mozilla/mozjpeg/archive/v4.0.3.tar.gz tar -xzvf v4.0.3.tar.gz ''' @@ -37,7 +41,7 @@ windows_alias = "unsupported" [tasks.cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] linux_alias = "cc_unix" @@ -46,7 +50,7 @@ windows_alias = "unsupported" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] # Library [tasks.lib] @@ -55,12 +59,12 @@ mac_alias = "lib_unix" windows_alias = "unsupported" [tasks.lib_unix] -script=''' +script = ''' cd mozjpeg-4.0.3 && cmake . -DENABLE_SHARED=false -DPNG_SUPPORTED=false -DCMAKE_C_COMPILER="${LIBAFL_CC}" -DCMAKE_CXX_COMPILER="${LIBAFL_CXX}" -G "Unix Makefiles" cd "${PROJECT_DIR}" make -C mozjpeg-4.0.3 ''' -dependencies = [ "mozjpeg", "cxx", "cc" ] +dependencies = ["mozjpeg", "cxx", "cc"] # Harness @@ -71,8 +75,18 @@ windows_alias = "unsupported" [tasks.fuzzer_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/mozjpeg-4.0.3/libjpeg.a", "${PROJECT_DIR}/mozjpeg-4.0.3/libturbojpeg.a", "-I", "${PROJECT_DIR}/mozjpeg-4.0.3/", "-o", "${FUZZER_NAME}", "-lm", "-lz"] -dependencies = [ "lib", "cxx", "cc" ] +args = [ + "${PROJECT_DIR}/harness.cc", + "${PROJECT_DIR}/mozjpeg-4.0.3/libjpeg.a", + "${PROJECT_DIR}/mozjpeg-4.0.3/libturbojpeg.a", + "-I", + "${PROJECT_DIR}/mozjpeg-4.0.3/", + "-o", + "${FUZZER_NAME}", + "-lm", + "-lz", +] +dependencies = ["lib", "cxx", "cc"] # Run the fuzzer [tasks.run] @@ -82,12 +96,12 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} & sleep 0.2 ./${FUZZER_NAME} ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Test [tasks.test] @@ -97,7 +111,7 @@ windows_alias = "unsupported" [tasks.test_linux] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true (timeout 31s ./${FUZZER_NAME} | tee fuzz_stdout.log 2>/dev/null || true) & sleep 0.2 @@ -109,10 +123,10 @@ else exit 1 fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test_mac] -script=''' +script = ''' echo "Skipping build on MacOS as libpng in Github is ancient, see LibAFL GH issue #254" ''' @@ -125,8 +139,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} make -C mozjpeg-4.0.3 clean cargo clean diff --git a/fuzzers/libfuzzer_libmozjpeg/README.md b/fuzzers/inprocess/libfuzzer_libmozjpeg/README.md similarity index 100% rename from fuzzers/libfuzzer_libmozjpeg/README.md rename to fuzzers/inprocess/libfuzzer_libmozjpeg/README.md diff --git a/fuzzers/libfuzzer_libmozjpeg/build.rs b/fuzzers/inprocess/libfuzzer_libmozjpeg/build.rs similarity index 100% rename from fuzzers/libfuzzer_libmozjpeg/build.rs rename to fuzzers/inprocess/libfuzzer_libmozjpeg/build.rs diff --git a/fuzzers/libfuzzer_libmozjpeg/corpus/blank.jpg b/fuzzers/inprocess/libfuzzer_libmozjpeg/corpus/blank.jpg similarity index 100% rename from fuzzers/libfuzzer_libmozjpeg/corpus/blank.jpg rename to fuzzers/inprocess/libfuzzer_libmozjpeg/corpus/blank.jpg diff --git a/fuzzers/libfuzzer_libmozjpeg/harness.cc b/fuzzers/inprocess/libfuzzer_libmozjpeg/harness.cc similarity index 100% rename from fuzzers/libfuzzer_libmozjpeg/harness.cc rename to fuzzers/inprocess/libfuzzer_libmozjpeg/harness.cc diff --git a/fuzzers/libfuzzer_libmozjpeg/hook_allocs.c b/fuzzers/inprocess/libfuzzer_libmozjpeg/hook_allocs.c similarity index 92% rename from fuzzers/libfuzzer_libmozjpeg/hook_allocs.c rename to fuzzers/inprocess/libfuzzer_libmozjpeg/hook_allocs.c index 1399261c65..e1d7e2203e 100644 --- a/fuzzers/libfuzzer_libmozjpeg/hook_allocs.c +++ b/fuzzers/inprocess/libfuzzer_libmozjpeg/hook_allocs.c @@ -7,9 +7,9 @@ #ifdef _WIN32 #define posix_memalign(p, a, s) \ (((*(p)) = _aligned_malloc((s), (a))), *(p) ? 0 : errno) - #define RETADDR (uintptr_t) _ReturnAddress() + #define RETADDR (uintptr_t)_ReturnAddress() #else - #define RETADDR (uintptr_t) __builtin_return_address(0) + #define RETADDR (uintptr_t)__builtin_return_address(0) #endif #ifdef __GNUC__ diff --git a/fuzzers/libfuzzer_libmozjpeg/jpeg.dict b/fuzzers/inprocess/libfuzzer_libmozjpeg/jpeg.dict similarity index 86% rename from fuzzers/libfuzzer_libmozjpeg/jpeg.dict rename to fuzzers/inprocess/libfuzzer_libmozjpeg/jpeg.dict index f6215d224d..e68dfe3019 100644 --- a/fuzzers/libfuzzer_libmozjpeg/jpeg.dict +++ b/fuzzers/inprocess/libfuzzer_libmozjpeg/jpeg.dict @@ -1,5 +1,5 @@ # -# AFL dictionary for JPEG images +# AFL tokens file for JPEG images # ------------------------------ # # Created by Michal Zalewski diff --git a/fuzzers/libfuzzer_libmozjpeg/src/bin/libafl_cc.rs b/fuzzers/inprocess/libfuzzer_libmozjpeg/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/libfuzzer_libmozjpeg/src/bin/libafl_cc.rs rename to fuzzers/inprocess/libfuzzer_libmozjpeg/src/bin/libafl_cc.rs diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/src/bin/libafl_cxx.rs b/fuzzers/inprocess/libfuzzer_libmozjpeg/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_aflpp_ui/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/libfuzzer_libmozjpeg/src/bin/libafl_cxx.rs diff --git a/fuzzers/libfuzzer_libmozjpeg/src/lib.rs b/fuzzers/inprocess/libfuzzer_libmozjpeg/src/lib.rs similarity index 91% rename from fuzzers/libfuzzer_libmozjpeg/src/lib.rs rename to fuzzers/inprocess/libfuzzer_libmozjpeg/src/lib.rs index 99db317f11..2384a53239 100644 --- a/fuzzers/libfuzzer_libmozjpeg/src/lib.rs +++ b/fuzzers/inprocess/libfuzzer_libmozjpeg/src/lib.rs @@ -16,7 +16,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::SimpleMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::Tokens, }, observers::StdMapObserver, @@ -80,10 +81,17 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let edges_observer = unsafe { std_edges_map_observer("edges") }; // Create an observation channel using the cmp map - let cmps_observer = unsafe { StdMapObserver::new("cmps", &mut CMP_MAP) }; + let cmps_observer = + unsafe { StdMapObserver::from_mut_ptr("cmps", CMP_MAP.as_mut_ptr(), CMP_MAP.len()) }; // Create an observation channel using the allocations map - let allocs_observer = unsafe { StdMapObserver::new("allocs", &mut libafl_alloc_map) }; + let allocs_observer = unsafe { + StdMapObserver::from_mut_ptr( + "allocs", + libafl_alloc_map.as_mut_ptr(), + libafl_alloc_map.len(), + ) + }; // Feedback to rate the interestingness of an input let mut feedback = feedback_or!( @@ -135,7 +143,9 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -151,8 +161,8 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { - println!("Warning: LLVMFuzzerInitialize failed with -1") + if unsafe { libfuzzer_initialize(&args) } == -1 { + println!("Warning: LLVMFuzzerInitialize failed with -1"); } // In case the corpus is empty (on first run), reset diff --git a/fuzzers/baby_no_std/.gitignore b/fuzzers/inprocess/libfuzzer_libpng/.gitignore similarity index 100% rename from fuzzers/baby_no_std/.gitignore rename to fuzzers/inprocess/libfuzzer_libpng/.gitignore diff --git a/fuzzers/inprocess/libfuzzer_libpng/Cargo.toml b/fuzzers/inprocess/libfuzzer_libpng/Cargo.toml new file mode 100644 index 0000000000..bc8938c28f --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_libpng/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "libfuzzer_libpng" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] +# Forces a crash +crash = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl", features = ["default"] } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", + "sancov_cmplog", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } + +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } + +[lib] +name = "libfuzzer_libpng" +crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng/Makefile.toml b/fuzzers/inprocess/libfuzzer_libpng/Makefile.toml similarity index 71% rename from fuzzers/libfuzzer_libpng/Makefile.toml rename to fuzzers/inprocess/libfuzzer_libpng/Makefile.toml index fb1dc6cfd2..7b0c8d0213 100644 --- a/fuzzers/libfuzzer_libpng/Makefile.toml +++ b/fuzzers/inprocess/libfuzzer_libpng/Makefile.toml @@ -1,17 +1,21 @@ # Variables [env] -FUZZER_NAME='fuzzer_libpng' +FUZZER_NAME = 'fuzzer_libpng' PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' LIBAFL_CXX = '${CARGO_TARGET_DIR}/${PROFILE}/libafl_cxx' FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -22,9 +26,9 @@ mac_alias = "libpng_unix" windows_alias = "unsupported" [tasks.libpng_unix] -condition = { files_not_exist = ["./libpng-1.6.37"]} -script_runner="@shell" -script=''' +condition = { files_not_exist = ["./libpng-1.6.37"] } +script_runner = "@shell" +script = ''' wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz tar -xvf v1.6.37.tar.gz ''' @@ -37,7 +41,7 @@ windows_alias = "unsupported" [tasks.cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] linux_alias = "cc_unix" @@ -46,7 +50,7 @@ windows_alias = "unsupported" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.crash_cxx] linux_alias = "crash_cxx_unix" @@ -55,7 +59,7 @@ windows_alias = "unsupported" [tasks.crash_cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--features=crash"] +args = ["build", "--profile", "${PROFILE}", "--features=crash"] [tasks.crash_cc] linux_alias = "crash_cc_unix" @@ -64,7 +68,7 @@ windows_alias = "unsupported" [tasks.crash_cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--features=crash"] +args = ["build", "--profile", "${PROFILE}", "--features=crash"] # Library [tasks.lib] @@ -73,13 +77,13 @@ mac_alias = "lib_unix" windows_alias = "unsupported" [tasks.lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd "${PROJECT_DIR}" make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" ''' -dependencies = [ "libpng", "cxx", "cc" ] +dependencies = ["libpng", "cxx", "cc"] # Library [tasks.crash_lib] @@ -88,13 +92,13 @@ mac_alias = "crash_lib_unix" windows_alias = "unsupported" [tasks.crash_lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd "${PROJECT_DIR}" make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" ''' -dependencies = [ "libpng", "crash_cxx", "crash_cc" ] +dependencies = ["libpng", "crash_cxx", "crash_cc"] # Harness [tasks.fuzzer] @@ -104,8 +108,17 @@ windows_alias = "unsupported" [tasks.fuzzer_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}", "-lm", "-lz"] -dependencies = [ "lib", "cxx", "cc" ] +args = [ + "${PROJECT_DIR}/harness.cc", + "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", + "-I", + "${PROJECT_DIR}/libpng-1.6.37/", + "-o", + "${FUZZER_NAME}", + "-lm", + "-lz", +] +dependencies = ["lib", "cxx", "cc"] # Crashing Harness [tasks.fuzzer_crash] @@ -115,8 +128,17 @@ windows_alias = "unsupported" [tasks.fuzzer_crash_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}_crash", "-lm", "-lz"] -dependencies = [ "crash_lib", "crash_cxx", "crash_cc" ] +args = [ + "${PROJECT_DIR}/harness.cc", + "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", + "-I", + "${PROJECT_DIR}/libpng-1.6.37/", + "-o", + "${FUZZER_NAME}_crash", + "-lm", + "-lz", +] +dependencies = ["crash_lib", "crash_cxx", "crash_cc"] # Run the fuzzer [tasks.run] @@ -126,12 +148,12 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} & sleep 0.2 ./${FUZZER_NAME} 2>/dev/null ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Run the fuzzer with a crash @@ -142,13 +164,12 @@ windows_alias = "unsupported" [tasks.crash_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME}_crash & sleep 0.2 ./${FUZZER_NAME}_crash 2>/dev/null ''' -dependencies = [ "fuzzer_crash" ] - +dependencies = ["fuzzer_crash"] # Test @@ -159,7 +180,7 @@ windows_alias = "unsupported" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true (timeout 31s ./${FUZZER_NAME} | tee fuzz_stdout.log 2>/dev/null || true) & sleep 0.2 @@ -171,17 +192,17 @@ else exit 1 fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test_mac] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true (timeout 31s ./${FUZZER_NAME} | tee fuzz_stdout.log 2>/dev/null || true) & sleep 0.2 timeout 30s ./${FUZZER_NAME} >/dev/null 2>/dev/null || true ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] @@ -192,8 +213,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} make -C libpng-1.6.37 clean cargo clean diff --git a/fuzzers/libfuzzer_libpng/README.md b/fuzzers/inprocess/libfuzzer_libpng/README.md similarity index 100% rename from fuzzers/libfuzzer_libpng/README.md rename to fuzzers/inprocess/libfuzzer_libpng/README.md diff --git a/fuzzers/libfuzzer_libpng_centralized/corpus/not_kitty.png b/fuzzers/inprocess/libfuzzer_libpng/corpus/not_kitty.png similarity index 100% rename from fuzzers/libfuzzer_libpng_centralized/corpus/not_kitty.png rename to fuzzers/inprocess/libfuzzer_libpng/corpus/not_kitty.png diff --git a/fuzzers/libfuzzer_libpng_centralized/corpus/not_kitty_alpha.png b/fuzzers/inprocess/libfuzzer_libpng/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/libfuzzer_libpng_centralized/corpus/not_kitty_alpha.png rename to fuzzers/inprocess/libfuzzer_libpng/corpus/not_kitty_alpha.png diff --git a/fuzzers/libfuzzer_libpng_centralized/corpus/not_kitty_gamma.png b/fuzzers/inprocess/libfuzzer_libpng/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/libfuzzer_libpng_centralized/corpus/not_kitty_gamma.png rename to fuzzers/inprocess/libfuzzer_libpng/corpus/not_kitty_gamma.png diff --git a/fuzzers/libfuzzer_libpng_centralized/corpus/not_kitty_icc.png b/fuzzers/inprocess/libfuzzer_libpng/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/libfuzzer_libpng_centralized/corpus/not_kitty_icc.png rename to fuzzers/inprocess/libfuzzer_libpng/corpus/not_kitty_icc.png diff --git a/fuzzers/libfuzzer_libpng/harness.cc b/fuzzers/inprocess/libfuzzer_libpng/harness.cc similarity index 100% rename from fuzzers/libfuzzer_libpng/harness.cc rename to fuzzers/inprocess/libfuzzer_libpng/harness.cc diff --git a/fuzzers/libfuzzer_libpng/src/bin/libafl_cc.rs b/fuzzers/inprocess/libfuzzer_libpng/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/libfuzzer_libpng/src/bin/libafl_cc.rs rename to fuzzers/inprocess/libfuzzer_libpng/src/bin/libafl_cc.rs diff --git a/fuzzers/libfuzzer_libpng_centralized/src/bin/libafl_cxx.rs b/fuzzers/inprocess/libfuzzer_libpng/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_centralized/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/libfuzzer_libpng/src/bin/libafl_cxx.rs diff --git a/fuzzers/libfuzzer_libpng/src/lib.rs b/fuzzers/inprocess/libfuzzer_libpng/src/lib.rs similarity index 93% rename from fuzzers/libfuzzer_libpng/src/lib.rs rename to fuzzers/inprocess/libfuzzer_libpng/src/lib.rs index f1fff436c6..9718674d32 100644 --- a/fuzzers/libfuzzer_libpng/src/lib.rs +++ b/fuzzers/inprocess/libfuzzer_libpng/src/lib.rs @@ -15,7 +15,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::Tokens, }, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, @@ -142,14 +143,19 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let mutator = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations())); - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); let mut stages = tuple_list!(calibration, power); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( &edges_observer, - StdWeightedScheduler::with_schedule(&mut state, &edges_observer, Some(PowerSchedule::FAST)), + StdWeightedScheduler::with_schedule( + &mut state, + &edges_observer, + Some(PowerSchedule::fast()), + ), ); // A fuzzer with feedbacks and a corpus scheduler @@ -167,7 +173,9 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re *addr = 1; } } - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -185,7 +193,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } diff --git a/fuzzers/fuzzbench_fork_qemu/.gitignore b/fuzzers/inprocess/libfuzzer_libpng_accounting/.gitignore similarity index 100% rename from fuzzers/fuzzbench_fork_qemu/.gitignore rename to fuzzers/inprocess/libfuzzer_libpng_accounting/.gitignore diff --git a/fuzzers/inprocess/libfuzzer_libpng_accounting/Cargo.toml b/fuzzers/inprocess/libfuzzer_libpng_accounting/Cargo.toml new file mode 100644 index 0000000000..2a83735cce --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_libpng_accounting/Cargo.toml @@ -0,0 +1,49 @@ +[package] +name = "libfuzzer_libpng_accounting" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl", features = [ + "std", + "derive", + "llmp_compression", + "introspection", +] } +libafl_bolts = { path = "../../../libafl_bolts", features = [ + "std", + "derive", + "llmp_compression", +] } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc/" } + +clap = { version = "4.5.18", features = ["derive"] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } + +[lib] +name = "libfuzzer_libpng" +crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_accounting/Makefile.toml b/fuzzers/inprocess/libfuzzer_libpng_accounting/Makefile.toml similarity index 71% rename from fuzzers/libfuzzer_libpng_accounting/Makefile.toml rename to fuzzers/inprocess/libfuzzer_libpng_accounting/Makefile.toml index 13aaf5e3a3..d928de5bbc 100644 --- a/fuzzers/libfuzzer_libpng_accounting/Makefile.toml +++ b/fuzzers/inprocess/libfuzzer_libpng_accounting/Makefile.toml @@ -1,17 +1,21 @@ # Variables [env] -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} -FUZZER_NAME='fuzzer_libpng_accounting' +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } +FUZZER_NAME = 'fuzzer_libpng_accounting' LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' LIBAFL_CXX = '${CARGO_TARGET_DIR}/${PROFILE}/libafl_cxx' FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' PROJECT_DIR = { script = ["pwd"] } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this platform" ''' @@ -22,9 +26,9 @@ mac_alias = "libpng_unix" windows_alias = "unsupported" [tasks.libpng_unix] -condition = { files_not_exist = ["./libpng-1.6.37"]} -script_runner="@shell" -script=''' +condition = { files_not_exist = ["./libpng-1.6.37"] } +script_runner = "@shell" +script = ''' wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz tar -xvf v1.6.37.tar.gz ''' @@ -37,7 +41,7 @@ windows_alias = "unsupported" [tasks.cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] linux_alias = "cc_unix" @@ -46,7 +50,7 @@ windows_alias = "unsupported" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] # Library [tasks.lib] @@ -55,13 +59,13 @@ mac_alias = "lib_unix" windows_alias = "unsupported" [tasks.lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd "${PROJECT_DIR}" make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" ''' -dependencies = [ "libpng", "cxx", "cc" ] +dependencies = ["libpng", "cxx", "cc"] # Harness @@ -72,8 +76,17 @@ windows_alias = "unsupported" [tasks.fuzzer_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}", "-lm", "-lz"] -dependencies = [ "lib", "cxx", "cc" ] +args = [ + "${PROJECT_DIR}/harness.cc", + "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", + "-I", + "${PROJECT_DIR}/libpng-1.6.37/", + "-o", + "${FUZZER_NAME}", + "-lm", + "-lz", +] +dependencies = ["lib", "cxx", "cc"] # Run the fuzzer [tasks.run] @@ -83,10 +96,10 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} --cores 0 --input ./corpus ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Test [tasks.test] @@ -96,7 +109,7 @@ windows_alias = "unsupported" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 31s ./${FUZZER_NAME} --cores 0 --input ./corpus | tee fuzz_stdout.log 2>/dev/null || true if grep -qa "corpus: 30" fuzz_stdout.log; then @@ -106,15 +119,15 @@ else exit 1 fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test_mac] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 31s ./${FUZZER_NAME} --cores 0 --input ./corpus | tee fuzz_stdout.log 2>/dev/null || true ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] @@ -125,8 +138,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} make -C libpng-1.6.37 clean cargo clean diff --git a/fuzzers/libfuzzer_libpng_accounting/README.md b/fuzzers/inprocess/libfuzzer_libpng_accounting/README.md similarity index 100% rename from fuzzers/libfuzzer_libpng_accounting/README.md rename to fuzzers/inprocess/libfuzzer_libpng_accounting/README.md diff --git a/fuzzers/libfuzzer_libpng_cmin/corpus/not_kitty.png b/fuzzers/inprocess/libfuzzer_libpng_accounting/corpus/not_kitty.png similarity index 100% rename from fuzzers/libfuzzer_libpng_cmin/corpus/not_kitty.png rename to fuzzers/inprocess/libfuzzer_libpng_accounting/corpus/not_kitty.png diff --git a/fuzzers/libfuzzer_libpng_cmin/corpus/not_kitty_alpha.png b/fuzzers/inprocess/libfuzzer_libpng_accounting/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/libfuzzer_libpng_cmin/corpus/not_kitty_alpha.png rename to fuzzers/inprocess/libfuzzer_libpng_accounting/corpus/not_kitty_alpha.png diff --git a/fuzzers/libfuzzer_libpng_cmin/corpus/not_kitty_gamma.png b/fuzzers/inprocess/libfuzzer_libpng_accounting/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/libfuzzer_libpng_cmin/corpus/not_kitty_gamma.png rename to fuzzers/inprocess/libfuzzer_libpng_accounting/corpus/not_kitty_gamma.png diff --git a/fuzzers/libfuzzer_libpng_cmin/corpus/not_kitty_icc.png b/fuzzers/inprocess/libfuzzer_libpng_accounting/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/libfuzzer_libpng_cmin/corpus/not_kitty_icc.png rename to fuzzers/inprocess/libfuzzer_libpng_accounting/corpus/not_kitty_icc.png diff --git a/fuzzers/libfuzzer_libpng_accounting/harness.cc b/fuzzers/inprocess/libfuzzer_libpng_accounting/harness.cc similarity index 100% rename from fuzzers/libfuzzer_libpng_accounting/harness.cc rename to fuzzers/inprocess/libfuzzer_libpng_accounting/harness.cc diff --git a/fuzzers/libfuzzer_libpng_accounting/src/bin/libafl_cc.rs b/fuzzers/inprocess/libfuzzer_libpng_accounting/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_accounting/src/bin/libafl_cc.rs rename to fuzzers/inprocess/libfuzzer_libpng_accounting/src/bin/libafl_cc.rs diff --git a/fuzzers/libfuzzer_libpng_cmin/src/bin/libafl_cxx.rs b/fuzzers/inprocess/libfuzzer_libpng_accounting/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_cmin/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/libfuzzer_libpng_accounting/src/bin/libafl_cxx.rs diff --git a/fuzzers/libfuzzer_libpng_accounting/src/lib.rs b/fuzzers/inprocess/libfuzzer_libpng_accounting/src/lib.rs similarity index 97% rename from fuzzers/libfuzzer_libpng_accounting/src/lib.rs rename to fuzzers/inprocess/libfuzzer_libpng_accounting/src/lib.rs index c52cfd68a8..5b36981da2 100644 --- a/fuzzers/libfuzzer_libpng_accounting/src/lib.rs +++ b/fuzzers/inprocess/libfuzzer_libpng_accounting/src/lib.rs @@ -16,7 +16,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::Tokens, }, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, @@ -136,7 +137,7 @@ pub extern "C" fn libafl_main() { let monitor = MultiMonitor::new(|s| println!("{s}")); - let mut run_client = |state: Option<_>, mut restarting_mgr, _core_id| { + let mut run_client = |state: Option<_>, mut restarting_mgr, _client_description| { // Create an observation channel using the coverage map let edges_observer = HitcountsMapObserver::new(unsafe { StdMapObserver::from_mut_ptr("edges", EDGES_MAP.as_mut_ptr(), MAX_EDGES_FOUND) @@ -209,7 +210,9 @@ pub extern "C" fn libafl_main() { let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -226,7 +229,7 @@ pub extern "C" fn libafl_main() { // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } diff --git a/fuzzers/libfuzzer_libpng_centralized/.gitignore b/fuzzers/inprocess/libfuzzer_libpng_centralized/.gitignore similarity index 100% rename from fuzzers/libfuzzer_libpng_centralized/.gitignore rename to fuzzers/inprocess/libfuzzer_libpng_centralized/.gitignore diff --git a/fuzzers/inprocess/libfuzzer_libpng_centralized/Cargo.toml b/fuzzers/inprocess/libfuzzer_libpng_centralized/Cargo.toml new file mode 100644 index 0000000000..319f0cdc5f --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_libpng_centralized/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "libfuzzer_libpng_launcher_centralized" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl", features = [ + "std", + "derive", + "rand_trait", + "fork", + "prelude", + "gzip", + "regex", + "scalability_introspection", +] } +libafl_bolts = { path = "../../../libafl_bolts", features = [ + "errors_backtrace", +] } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } + +clap = { version = "4.5.18", features = ["derive"] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } +env_logger = "0.11.5" + +[lib] +name = "libfuzzer_libpng" +crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_centralized/Makefile.toml b/fuzzers/inprocess/libfuzzer_libpng_centralized/Makefile.toml similarity index 71% rename from fuzzers/libfuzzer_libpng_centralized/Makefile.toml rename to fuzzers/inprocess/libfuzzer_libpng_centralized/Makefile.toml index a1e30b393d..12434f52c0 100644 --- a/fuzzers/libfuzzer_libpng_centralized/Makefile.toml +++ b/fuzzers/inprocess/libfuzzer_libpng_centralized/Makefile.toml @@ -1,17 +1,21 @@ # Variables [env] -FUZZER_NAME='fuzzer_libpng_launcher' -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +FUZZER_NAME = 'fuzzer_libpng_launcher' +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' LIBAFL_CXX = '${CARGO_TARGET_DIR}/${PROFILE}/libafl_cxx' FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' PROJECT_DIR = { script = ["pwd"] } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this platform" ''' @@ -22,9 +26,9 @@ mac_alias = "libpng_unix" windows_alias = "unsupported" [tasks.libpng_unix] -condition = { files_not_exist = ["./libpng-1.6.37"]} -script_runner="@shell" -script=''' +condition = { files_not_exist = ["./libpng-1.6.37"] } +script_runner = "@shell" +script = ''' wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz tar -xvf v1.6.37.tar.gz ''' @@ -37,7 +41,7 @@ windows_alias = "unsupported" [tasks.cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] linux_alias = "cc_unix" @@ -46,7 +50,7 @@ windows_alias = "unsupported" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] # Library [tasks.lib] @@ -55,13 +59,13 @@ mac_alias = "lib_unix" windows_alias = "unsupported" [tasks.lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd "${PROJECT_DIR}" make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" ''' -dependencies = [ "libpng", "cxx", "cc" ] +dependencies = ["libpng", "cxx", "cc"] # Harness @@ -72,8 +76,17 @@ windows_alias = "unsupported" [tasks.fuzzer_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}", "-lm", "-lz"] -dependencies = [ "lib", "cxx", "cc" ] +args = [ + "${PROJECT_DIR}/harness.cc", + "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", + "-I", + "${PROJECT_DIR}/libpng-1.6.37/", + "-o", + "${FUZZER_NAME}", + "-lm", + "-lz", +] +dependencies = ["lib", "cxx", "cc"] # Run the fuzzer [tasks.run] @@ -83,10 +96,10 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} --cores 0-1 --input ./corpus ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Test [tasks.test] @@ -96,7 +109,7 @@ windows_alias = "unsupported" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 31s ./${FUZZER_NAME} --cores 0-1 --input ./corpus 2>/dev/null | tee fuzz_stdout.log || true if grep -qa "corpus: 30" fuzz_stdout.log; then @@ -106,15 +119,15 @@ else exit 1 fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test_mac] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 31s ./${FUZZER_NAME} --cores 0 --input ./corpus 2>/dev/null | tee fuzz_stdout.log || true ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] @@ -125,8 +138,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} make -C libpng-1.6.37 clean cargo clean diff --git a/fuzzers/libfuzzer_libpng_centralized/README.md b/fuzzers/inprocess/libfuzzer_libpng_centralized/README.md similarity index 100% rename from fuzzers/libfuzzer_libpng_centralized/README.md rename to fuzzers/inprocess/libfuzzer_libpng_centralized/README.md diff --git a/fuzzers/libfuzzer_libpng_launcher/corpus/not_kitty.png b/fuzzers/inprocess/libfuzzer_libpng_centralized/corpus/not_kitty.png similarity index 100% rename from fuzzers/libfuzzer_libpng_launcher/corpus/not_kitty.png rename to fuzzers/inprocess/libfuzzer_libpng_centralized/corpus/not_kitty.png diff --git a/fuzzers/libfuzzer_libpng_launcher/corpus/not_kitty_alpha.png b/fuzzers/inprocess/libfuzzer_libpng_centralized/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/libfuzzer_libpng_launcher/corpus/not_kitty_alpha.png rename to fuzzers/inprocess/libfuzzer_libpng_centralized/corpus/not_kitty_alpha.png diff --git a/fuzzers/libfuzzer_libpng_launcher/corpus/not_kitty_gamma.png b/fuzzers/inprocess/libfuzzer_libpng_centralized/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/libfuzzer_libpng_launcher/corpus/not_kitty_gamma.png rename to fuzzers/inprocess/libfuzzer_libpng_centralized/corpus/not_kitty_gamma.png diff --git a/fuzzers/libfuzzer_libpng_launcher/corpus/not_kitty_icc.png b/fuzzers/inprocess/libfuzzer_libpng_centralized/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/libfuzzer_libpng_launcher/corpus/not_kitty_icc.png rename to fuzzers/inprocess/libfuzzer_libpng_centralized/corpus/not_kitty_icc.png diff --git a/fuzzers/libfuzzer_libpng_centralized/harness.cc b/fuzzers/inprocess/libfuzzer_libpng_centralized/harness.cc similarity index 100% rename from fuzzers/libfuzzer_libpng_centralized/harness.cc rename to fuzzers/inprocess/libfuzzer_libpng_centralized/harness.cc diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/src/bin/libafl_cc.rs b/fuzzers/inprocess/libfuzzer_libpng_centralized/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_aflpp_ui/src/bin/libafl_cc.rs rename to fuzzers/inprocess/libfuzzer_libpng_centralized/src/bin/libafl_cc.rs diff --git a/fuzzers/libfuzzer_libpng_launcher/src/bin/libafl_cxx.rs b/fuzzers/inprocess/libfuzzer_libpng_centralized/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_launcher/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/libfuzzer_libpng_centralized/src/bin/libafl_cxx.rs diff --git a/fuzzers/inprocess/libfuzzer_libpng_centralized/src/lib.rs b/fuzzers/inprocess/libfuzzer_libpng_centralized/src/lib.rs new file mode 100644 index 0000000000..62da02c185 --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_libpng_centralized/src/lib.rs @@ -0,0 +1,284 @@ +//! A libfuzzer-like fuzzer with llmp-multithreading support and restarts +//! The example harness is built for libpng. +//! In this example, you will see the use of the `launcher` feature. +//! The `launcher` will spawn new processes for each cpu core. +use core::time::Duration; +use std::{env, net::SocketAddr, path::PathBuf}; + +use clap::{self, Parser}; +use libafl::{ + corpus::{Corpus, InMemoryCorpus, OnDiskCorpus}, + events::{ + centralized::CentralizedEventManager, launcher::CentralizedLauncher, ClientDescription, + EventConfig, + }, + executors::{inprocess::InProcessExecutor, ExitKind}, + feedback_or, feedback_or_fast, + feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, + fuzzer::{Fuzzer, StdFuzzer}, + inputs::{BytesInput, HasTargetBytes}, + monitors::MultiMonitor, + mutators::{ + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, + token_mutations::Tokens, + }, + observers::{CanTrack, HitcountsMapObserver, TimeObserver}, + schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, + stages::mutational::StdMutationalStage, + state::{HasCorpus, StdState}, + Error, HasMetadata, +}; +use libafl_bolts::{ + core_affinity::Cores, + rands::StdRand, + shmem::{ShMemProvider, StdShMemProvider}, + tuples::{tuple_list, Merge}, + AsSlice, +}; +use libafl_targets::{libfuzzer_initialize, libfuzzer_test_one_input, std_edges_map_observer}; +use mimalloc::MiMalloc; + +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; + +/// Parse a millis string to a [`Duration`]. Used for arg parsing. +fn timeout_from_millis_str(time: &str) -> Result { + Ok(Duration::from_millis(time.parse()?)) +} + +/// The commandline args this fuzzer accepts +#[derive(Debug, Parser)] +#[command( + name = "libfuzzer_libpng_launcher", + about = "A libfuzzer-like fuzzer for libpng with llmp-multithreading support and a launcher", + author = "Andrea Fioraldi , Dominik Maier " +)] +struct Opt { + #[arg( + short, + long, + value_parser = Cores::from_cmdline, + help = "Spawn a client in each of the provided cores. Broker runs in the 0th core. 'all' to select all available cores. 'none' to run a client without binding to any core. eg: '1,2-4,6' selects the cores 1,2,3,4,6.", + name = "CORES" + )] + cores: Cores, + + #[arg( + short = 'p', + long, + help = "Choose the broker TCP port, default is 1337", + name = "PORT", + default_value = "1337" + )] + broker_port: u16, + + #[arg(short = 'a', long, help = "Specify a remote broker", name = "REMOTE")] + remote_broker_addr: Option, + + #[arg( + short, + long, + help = "Set an initial corpus directory", + name = "INPUT", + required = true + )] + input: Vec, + + #[arg( + short, + long, + help = "Set the output directory, default is ./out", + name = "OUTPUT", + default_value = "./out" + )] + output: PathBuf, + + #[arg( + value_parser = timeout_from_millis_str, + short, + long, + help = "Set the exeucution timeout in milliseconds, default is 10000", + name = "TIMEOUT", + default_value = "10000" + )] + timeout: Duration, + /* + /// This fuzzer has hard-coded tokens + #[arg( + + short = "x", + long, + help = "Feed the fuzzer with an user-specified list of tokens (often called \"dictionary\"", + name = "TOKENS", + multiple = true + )] + tokens: Vec, + */ +} + +/// The main fn, `no_mangle` as it is a C symbol +#[no_mangle] +pub extern "C" fn libafl_main() { + env_logger::init(); + + // Registry the metadata types used in this fuzzer + // Needed only on no_std + // unsafe { RegistryBuilder::register::(); } + let opt = Opt::parse(); + + let broker_port = opt.broker_port; + let cores = opt.cores; + + println!( + "Workdir: {:?}", + env::current_dir().unwrap().to_string_lossy().to_string() + ); + + let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory"); + + let monitor = MultiMonitor::new(|s| println!("{s}")); + + let mut secondary_run_client = + |state: Option<_>, + mut mgr: CentralizedEventManager<_, _, _, _>, + _client_description: ClientDescription| { + // Create an observation channel using the coverage map + let edges_observer = + HitcountsMapObserver::new(unsafe { std_edges_map_observer("edges") }) + .track_indices(); + + // Create an observation channel to keep track of the execution time + let time_observer = TimeObserver::new("time"); + + // Feedback to rate the interestingness of an input + // This one is composed by two Feedbacks in OR + let mut feedback = feedback_or!( + // New maximization map feedback linked to the edges observer and the feedback state + MaxMapFeedback::new(&edges_observer), + // Time feedback, this one does not need a feedback state + TimeFeedback::new(&time_observer) + ); + + // A feedback to choose if an input is a solution or not + let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new()); + + // If not restarting, create a State from scratch + let mut state = state.unwrap_or_else(|| { + StdState::new( + // RNG + StdRand::new(), + // Corpus that will be evolved, we keep it in memory for performance + InMemoryCorpus::new(), + // Corpus in which we store solutions (crashes in this example), + // on disk so the user can get them after stopping the fuzzer + OnDiskCorpus::new(&opt.output).unwrap(), + // States of the feedbacks. + // The feedbacks can report the data that should persist in the State. + &mut feedback, + // Same for objective feedbacks + &mut objective, + ) + .unwrap() + }); + + println!("We're a client, let's fuzz :)"); + + // Create a PNG dictionary if not existing + if state.metadata_map().get::().is_none() { + state.add_metadata(Tokens::from([ + vec![137, 80, 78, 71, 13, 10, 26, 10], // PNG header + "IHDR".as_bytes().to_vec(), + "IDAT".as_bytes().to_vec(), + "PLTE".as_bytes().to_vec(), + "IEND".as_bytes().to_vec(), + ])); + } + + // Setup a basic mutator with a mutational stage + let mutator = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations())); + let mut stages = tuple_list!(StdMutationalStage::new(mutator)); + + // A minimization+queue policy to get testcasess from the corpus + let scheduler = + IndexesLenTimeMinimizerScheduler::new(&edges_observer, QueueScheduler::new()); + + // A fuzzer with feedbacks and a corpus scheduler + let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); + + // The wrapped harness function, calling out to the LLVM-style harness + let mut harness = |input: &BytesInput| { + let target = input.target_bytes(); + let buf = target.as_slice(); + unsafe { + libfuzzer_test_one_input(buf); + } + ExitKind::Ok + }; + + // Create the executor for an in-process function with one observer for edge coverage and one for the execution time + #[cfg(target_os = "linux")] + let mut executor = InProcessExecutor::batched_timeout( + &mut harness, + tuple_list!(edges_observer, time_observer), + &mut fuzzer, + &mut state, + &mut mgr, + opt.timeout, + )?; + + #[cfg(not(target_os = "linux"))] + let mut executor = InProcessExecutor::with_timeout( + &mut harness, + tuple_list!(edges_observer, time_observer), + &mut fuzzer, + &mut state, + &mut mgr, + opt.timeout, + )?; + + // The actual target run starts here. + // Call LLVMFUzzerInitialize() if present. + let args: Vec = env::args().collect(); + if unsafe { libfuzzer_initialize(&args) } == -1 { + println!("Warning: LLVMFuzzerInitialize failed with -1"); + } + + // In case the corpus is empty (on first run), reset + if state.must_load_initial_inputs() { + state + .load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &opt.input) + .unwrap_or_else(|_| { + panic!("Failed to load initial corpus at {:?}", &opt.input) + }); + println!("We imported {} inputs from disk.", state.corpus().count()); + } + if !mgr.is_main() { + fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; + } else { + let mut empty_stages = tuple_list!(); + fuzzer.fuzz_loop(&mut empty_stages, &mut executor, &mut state, &mut mgr)?; + } + Ok(()) + }; + + let mut main_run_client = secondary_run_client.clone(); // clone it just for borrow checker + + match CentralizedLauncher::builder() + .shmem_provider(shmem_provider) + .configuration(EventConfig::from_name("default")) + .monitor(monitor) + .secondary_run_client(&mut secondary_run_client) + .main_run_client(&mut main_run_client) + .cores(&cores) + .broker_port(broker_port) + .remote_broker_addr(opt.remote_broker_addr) + .stdout_file(Some("/dev/null")) + .build() + .launch() + { + Ok(()) => (), + Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."), + Err(err) => panic!("Failed to run launcher: {err:?}"), + } +} diff --git a/fuzzers/fuzzbench_qemu/.gitignore b/fuzzers/inprocess/libfuzzer_libpng_cmin/.gitignore similarity index 100% rename from fuzzers/fuzzbench_qemu/.gitignore rename to fuzzers/inprocess/libfuzzer_libpng_cmin/.gitignore diff --git a/fuzzers/inprocess/libfuzzer_libpng_cmin/Cargo.toml b/fuzzers/inprocess/libfuzzer_libpng_cmin/Cargo.toml new file mode 100644 index 0000000000..6e50c0f896 --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_libpng_cmin/Cargo.toml @@ -0,0 +1,44 @@ +[package] +name = "libfuzzer_libpng_cmin" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", + "Addison Crump ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] +# Forces a crash +crash = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl", features = ["default", "cmin"] } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", + "sancov_cmplog", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } + +env_logger = "0.11.5" +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } + +[lib] +name = "libfuzzer_libpng" +crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_cmin/Makefile.toml b/fuzzers/inprocess/libfuzzer_libpng_cmin/Makefile.toml similarity index 70% rename from fuzzers/libfuzzer_libpng_cmin/Makefile.toml rename to fuzzers/inprocess/libfuzzer_libpng_cmin/Makefile.toml index b590ba024b..d1bf2aaec3 100644 --- a/fuzzers/libfuzzer_libpng_cmin/Makefile.toml +++ b/fuzzers/inprocess/libfuzzer_libpng_cmin/Makefile.toml @@ -1,17 +1,21 @@ # Variables [env] -FUZZER_NAME='fuzzer_libpng' +FUZZER_NAME = 'fuzzer_libpng' PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' LIBAFL_CXX = '${CARGO_TARGET_DIR}/${PROFILE}/libafl_cxx' FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -22,9 +26,9 @@ mac_alias = "libpng_unix" windows_alias = "unsupported" [tasks.libpng_unix] -condition = { files_not_exist = ["./libpng-1.6.37"]} -script_runner="@shell" -script=''' +condition = { files_not_exist = ["./libpng-1.6.37"] } +script_runner = "@shell" +script = ''' wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz tar -xvf v1.6.37.tar.gz ''' @@ -37,7 +41,7 @@ windows_alias = "unsupported" [tasks.cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] linux_alias = "cc_unix" @@ -46,7 +50,7 @@ windows_alias = "unsupported" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.crash_cxx] linux_alias = "crash_cxx_unix" @@ -55,7 +59,7 @@ windows_alias = "unsupported" [tasks.crash_cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--features=crash"] +args = ["build", "--profile", "${PROFILE}", "--features=crash"] [tasks.crash_cc] linux_alias = "crash_cc_unix" @@ -64,7 +68,7 @@ windows_alias = "unsupported" [tasks.crash_cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--features=crash"] +args = ["build", "--profile", "${PROFILE}", "--features=crash"] # Library [tasks.lib] @@ -73,13 +77,13 @@ mac_alias = "lib_unix" windows_alias = "unsupported" [tasks.lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd "${PROJECT_DIR}" make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" ''' -dependencies = [ "libpng", "cxx", "cc" ] +dependencies = ["libpng", "cxx", "cc"] # Library [tasks.crash_lib] @@ -88,13 +92,13 @@ mac_alias = "crash_lib_unix" windows_alias = "unsupported" [tasks.crash_lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd "${PROJECT_DIR}" make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" ''' -dependencies = [ "libpng", "crash_cxx", "crash_cc" ] +dependencies = ["libpng", "crash_cxx", "crash_cc"] # Harness [tasks.fuzzer] @@ -104,8 +108,18 @@ windows_alias = "unsupported" [tasks.fuzzer_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}", "-lm", "-lz", "-lz3"] -dependencies = [ "lib", "cxx", "cc" ] +args = [ + "${PROJECT_DIR}/harness.cc", + "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", + "-I", + "${PROJECT_DIR}/libpng-1.6.37/", + "-o", + "${FUZZER_NAME}", + "-lm", + "-lz", + "-lz3", +] +dependencies = ["lib", "cxx", "cc"] # Crashing Harness [tasks.fuzzer_crash] @@ -115,8 +129,18 @@ windows_alias = "unsupported" [tasks.fuzzer_crash_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}_crash", "-lm", "-lz", "-lz3"] -dependencies = [ "crash_lib", "crash_cxx", "crash_cc" ] +args = [ + "${PROJECT_DIR}/harness.cc", + "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", + "-I", + "${PROJECT_DIR}/libpng-1.6.37/", + "-o", + "${FUZZER_NAME}_crash", + "-lm", + "-lz", + "-lz3", +] +dependencies = ["crash_lib", "crash_cxx", "crash_cc"] # Run the fuzzer [tasks.run] @@ -126,12 +150,12 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} & sleep 0.2 ./${FUZZER_NAME} 2>/dev/null ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Run the fuzzer with a crash @@ -142,13 +166,12 @@ windows_alias = "unsupported" [tasks.crash_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME}_crash & sleep 0.2 ./${FUZZER_NAME}_crash 2>/dev/null ''' -dependencies = [ "fuzzer_crash" ] - +dependencies = ["fuzzer_crash"] # Test @@ -159,7 +182,7 @@ windows_alias = "unsupported" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 31s ./${FUZZER_NAME} | tee fuzz_stdout.log & sleep 0.2 @@ -171,17 +194,17 @@ else exit 1 fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test_mac] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 31s ./${FUZZER_NAME} | tee fuzz_stdout.log & sleep 0.2 timeout 30s ./${FUZZER_NAME} >/dev/null 2>/dev/null || true ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] @@ -192,8 +215,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} make -C libpng-1.6.37 clean cargo clean diff --git a/fuzzers/libfuzzer_libpng_cmin/README.md b/fuzzers/inprocess/libfuzzer_libpng_cmin/README.md similarity index 95% rename from fuzzers/libfuzzer_libpng_cmin/README.md rename to fuzzers/inprocess/libfuzzer_libpng_cmin/README.md index ccf6ed3ab4..ea092ac70d 100644 --- a/fuzzers/libfuzzer_libpng_cmin/README.md +++ b/fuzzers/inprocess/libfuzzer_libpng_cmin/README.md @@ -22,7 +22,7 @@ cargo build --release This will build the library with the fuzzer (src/lib.rs) with the libfuzzer compatibility layer and the SanitizerCoverage runtime functions for coverage feedback. In addition, it will also build two C and C++ compiler wrappers (bin/libafl_c(libafl_c/xx).rs) that you must use to compile the target. -The compiler wrappers, `libafl_cc` and libafl_cxx`, will end up in `./target/release/` (or `./target/debug`, in case you did not build with the `--release` flag). +The compiler wrappers, `libafl_cc` and `libafl_cxx`, will end up in `./target/release/` (or `./target/debug`, in case you did not build with the `--release` flag). Then download libpng, and unpack the archive: ```bash diff --git a/fuzzers/libfuzzer_libpng_norestart/seeds/not_kitty.png b/fuzzers/inprocess/libfuzzer_libpng_cmin/corpus/not_kitty.png similarity index 100% rename from fuzzers/libfuzzer_libpng_norestart/seeds/not_kitty.png rename to fuzzers/inprocess/libfuzzer_libpng_cmin/corpus/not_kitty.png diff --git a/fuzzers/libfuzzer_libpng_norestart/seeds/not_kitty_alpha.png b/fuzzers/inprocess/libfuzzer_libpng_cmin/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/libfuzzer_libpng_norestart/seeds/not_kitty_alpha.png rename to fuzzers/inprocess/libfuzzer_libpng_cmin/corpus/not_kitty_alpha.png diff --git a/fuzzers/libfuzzer_libpng_norestart/seeds/not_kitty_gamma.png b/fuzzers/inprocess/libfuzzer_libpng_cmin/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/libfuzzer_libpng_norestart/seeds/not_kitty_gamma.png rename to fuzzers/inprocess/libfuzzer_libpng_cmin/corpus/not_kitty_gamma.png diff --git a/fuzzers/libfuzzer_libpng_norestart/seeds/not_kitty_icc.png b/fuzzers/inprocess/libfuzzer_libpng_cmin/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/libfuzzer_libpng_norestart/seeds/not_kitty_icc.png rename to fuzzers/inprocess/libfuzzer_libpng_cmin/corpus/not_kitty_icc.png diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/harness.cc b/fuzzers/inprocess/libfuzzer_libpng_cmin/harness.cc similarity index 100% rename from fuzzers/libfuzzer_libpng_aflpp_ui/harness.cc rename to fuzzers/inprocess/libfuzzer_libpng_cmin/harness.cc diff --git a/fuzzers/libfuzzer_libpng_cmin/src/bin/libafl_cc.rs b/fuzzers/inprocess/libfuzzer_libpng_cmin/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_cmin/src/bin/libafl_cc.rs rename to fuzzers/inprocess/libfuzzer_libpng_cmin/src/bin/libafl_cc.rs diff --git a/fuzzers/libfuzzer_libpng_norestart/src/bin/libafl_cxx.rs b/fuzzers/inprocess/libfuzzer_libpng_cmin/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_norestart/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/libfuzzer_libpng_cmin/src/bin/libafl_cxx.rs diff --git a/fuzzers/libfuzzer_libpng_cmin/src/lib.rs b/fuzzers/inprocess/libfuzzer_libpng_cmin/src/lib.rs similarity index 93% rename from fuzzers/libfuzzer_libpng_cmin/src/lib.rs rename to fuzzers/inprocess/libfuzzer_libpng_cmin/src/lib.rs index e024dcdbe8..823b04d79d 100644 --- a/fuzzers/libfuzzer_libpng_cmin/src/lib.rs +++ b/fuzzers/inprocess/libfuzzer_libpng_cmin/src/lib.rs @@ -6,10 +6,7 @@ use std::ptr; use std::{env, path::PathBuf}; use libafl::{ - corpus::{ - minimizer::{CorpusMinimizer, StdCorpusMinimizer}, - Corpus, InMemoryCorpus, OnDiskCorpus, - }, + corpus::{minimizer::StdCorpusMinimizer, Corpus, InMemoryCorpus, OnDiskCorpus}, events::{setup_restarting_mgr_std, EventConfig, EventFirer, EventRestarter, LogSeverity}, executors::{inprocess::InProcessExecutor, ExitKind}, feedback_or, feedback_or_fast, @@ -18,7 +15,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::Tokens, }, observers::{CanTrack, HitcountsMapObserver, TimeObserver}, @@ -142,14 +140,19 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let mutator = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations())); - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); let mut stages = tuple_list!(calibration, power); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( &edges_observer, - StdWeightedScheduler::with_schedule(&mut state, &edges_observer, Some(PowerSchedule::FAST)), + StdWeightedScheduler::with_schedule( + &mut state, + &edges_observer, + Some(PowerSchedule::fast()), + ), ); // A fuzzer with feedbacks and a corpus scheduler @@ -167,7 +170,9 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re *addr = 1; } } - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -184,7 +189,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } diff --git a/fuzzers/libfuzzer_libpng_launcher/.gitignore b/fuzzers/inprocess/libfuzzer_libpng_launcher/.gitignore similarity index 100% rename from fuzzers/libfuzzer_libpng_launcher/.gitignore rename to fuzzers/inprocess/libfuzzer_libpng_launcher/.gitignore diff --git a/fuzzers/inprocess/libfuzzer_libpng_launcher/Cargo.toml b/fuzzers/inprocess/libfuzzer_libpng_launcher/Cargo.toml new file mode 100644 index 0000000000..206866004c --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_libpng_launcher/Cargo.toml @@ -0,0 +1,45 @@ +[package] +name = "libfuzzer_libpng_launcher" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl", features = [ + "std", + "derive", + "llmp_compression", + "introspection", +] } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } + +clap = { version = "4.5.18", features = ["derive"] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } + +[lib] +name = "libfuzzer_libpng" +crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_launcher/Makefile.toml b/fuzzers/inprocess/libfuzzer_libpng_launcher/Makefile.toml similarity index 72% rename from fuzzers/libfuzzer_libpng_launcher/Makefile.toml rename to fuzzers/inprocess/libfuzzer_libpng_launcher/Makefile.toml index 86f63f1613..3dc75f27f3 100644 --- a/fuzzers/libfuzzer_libpng_launcher/Makefile.toml +++ b/fuzzers/inprocess/libfuzzer_libpng_launcher/Makefile.toml @@ -1,9 +1,13 @@ # Variables [env] -FUZZER_NAME='fuzzer_libpng_launcher' -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +FUZZER_NAME = 'fuzzer_libpng_launcher' +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' LIBAFL_CXX = '${CARGO_TARGET_DIR}/${PROFILE}/libafl_cxx' LIBAFL_LIBTOOL = '${CARGO_TARGET_DIR}/${PROFILE}/libafl_libtool' @@ -11,8 +15,8 @@ FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' PROJECT_DIR = { script = ["pwd"] } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this platform" ''' @@ -23,9 +27,9 @@ mac_alias = "libpng_unix" windows_alias = "unsupported" [tasks.libpng_unix] -condition = { files_not_exist = ["./libpng-1.6.37"]} -script_runner="@shell" -script=''' +condition = { files_not_exist = ["./libpng-1.6.37"] } +script_runner = "@shell" +script = ''' wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz tar -xvf v1.6.37.tar.gz ''' @@ -38,7 +42,7 @@ windows_alias = "unsupported" [tasks.cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] linux_alias = "cc_unix" @@ -47,7 +51,7 @@ windows_alias = "unsupported" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] # Library [tasks.lib] @@ -56,13 +60,13 @@ mac_alias = "lib_unix" windows_alias = "unsupported" [tasks.lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd "${PROJECT_DIR}" make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" LIBTOOL=${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_libtool ''' -dependencies = [ "libpng", "cxx", "cc" ] +dependencies = ["libpng", "cxx", "cc"] # Harness @@ -73,8 +77,17 @@ windows_alias = "unsupported" [tasks.fuzzer_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}", "-lm", "-lz"] -dependencies = [ "lib", "cxx", "cc" ] +args = [ + "${PROJECT_DIR}/harness.cc", + "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", + "-I", + "${PROJECT_DIR}/libpng-1.6.37/", + "-o", + "${FUZZER_NAME}", + "-lm", + "-lz", +] +dependencies = ["lib", "cxx", "cc"] # Run the fuzzer [tasks.run] @@ -84,10 +97,10 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME}.coverage --broker-port 21337 --cores 0 --input ./corpus ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Test [tasks.test] @@ -97,9 +110,9 @@ windows_alias = "unsupported" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true -timeout 31s ./${FUZZER_NAME}.coverage --broker-port 21337 --cores 0 --input ./corpus 2>/dev/null | tee fuzz_stdout.log || true +timeout 31s ./${FUZZER_NAME}.coverage --broker-port 21337 --cores 0 --input ./corpus | tee fuzz_stdout.log || true if grep -qa "corpus: 30" fuzz_stdout.log; then echo "Fuzzer is working" else @@ -107,15 +120,15 @@ else exit 1 fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test_mac] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 31s ./${FUZZER_NAME} --cores 0 --input ./corpus 2>/dev/null | tee fuzz_stdout.log || true ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] @@ -126,8 +139,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} make -C libpng-1.6.37 clean cargo clean diff --git a/fuzzers/libfuzzer_libpng_launcher/README.md b/fuzzers/inprocess/libfuzzer_libpng_launcher/README.md similarity index 100% rename from fuzzers/libfuzzer_libpng_launcher/README.md rename to fuzzers/inprocess/libfuzzer_libpng_launcher/README.md diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/corpus/not_kitty.png b/fuzzers/inprocess/libfuzzer_libpng_launcher/corpus/not_kitty.png similarity index 100% rename from fuzzers/libfuzzer_libpng_tcp_manager/corpus/not_kitty.png rename to fuzzers/inprocess/libfuzzer_libpng_launcher/corpus/not_kitty.png diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/corpus/not_kitty_alpha.png b/fuzzers/inprocess/libfuzzer_libpng_launcher/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/libfuzzer_libpng_tcp_manager/corpus/not_kitty_alpha.png rename to fuzzers/inprocess/libfuzzer_libpng_launcher/corpus/not_kitty_alpha.png diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/corpus/not_kitty_gamma.png b/fuzzers/inprocess/libfuzzer_libpng_launcher/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/libfuzzer_libpng_tcp_manager/corpus/not_kitty_gamma.png rename to fuzzers/inprocess/libfuzzer_libpng_launcher/corpus/not_kitty_gamma.png diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/corpus/not_kitty_icc.png b/fuzzers/inprocess/libfuzzer_libpng_launcher/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/libfuzzer_libpng_tcp_manager/corpus/not_kitty_icc.png rename to fuzzers/inprocess/libfuzzer_libpng_launcher/corpus/not_kitty_icc.png diff --git a/fuzzers/libfuzzer_libpng_launcher/harness.cc b/fuzzers/inprocess/libfuzzer_libpng_launcher/harness.cc similarity index 100% rename from fuzzers/libfuzzer_libpng_launcher/harness.cc rename to fuzzers/inprocess/libfuzzer_libpng_launcher/harness.cc diff --git a/fuzzers/libfuzzer_libpng_launcher/src/bin/libafl_ar.rs b/fuzzers/inprocess/libfuzzer_libpng_launcher/src/bin/libafl_ar.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_launcher/src/bin/libafl_ar.rs rename to fuzzers/inprocess/libfuzzer_libpng_launcher/src/bin/libafl_ar.rs diff --git a/fuzzers/libfuzzer_libpng_launcher/src/bin/libafl_cc.rs b/fuzzers/inprocess/libfuzzer_libpng_launcher/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_launcher/src/bin/libafl_cc.rs rename to fuzzers/inprocess/libfuzzer_libpng_launcher/src/bin/libafl_cc.rs diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/src/bin/libafl_cxx.rs b/fuzzers/inprocess/libfuzzer_libpng_launcher/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_tcp_manager/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/libfuzzer_libpng_launcher/src/bin/libafl_cxx.rs diff --git a/fuzzers/libfuzzer_libpng_launcher/src/bin/libafl_libtool.rs b/fuzzers/inprocess/libfuzzer_libpng_launcher/src/bin/libafl_libtool.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_launcher/src/bin/libafl_libtool.rs rename to fuzzers/inprocess/libfuzzer_libpng_launcher/src/bin/libafl_libtool.rs diff --git a/fuzzers/libfuzzer_libpng_launcher/src/lib.rs b/fuzzers/inprocess/libfuzzer_libpng_launcher/src/lib.rs similarity index 91% rename from fuzzers/libfuzzer_libpng_launcher/src/lib.rs rename to fuzzers/inprocess/libfuzzer_libpng_launcher/src/lib.rs index e9def09841..0c512e87de 100644 --- a/fuzzers/libfuzzer_libpng_launcher/src/lib.rs +++ b/fuzzers/inprocess/libfuzzer_libpng_launcher/src/lib.rs @@ -14,9 +14,10 @@ use libafl::{ feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, fuzzer::{Fuzzer, StdFuzzer}, inputs::{BytesInput, HasTargetBytes}, - monitors::{MultiMonitor, OnDiskTOMLMonitor}, + monitors::{MultiMonitor, OnDiskTomlMonitor}, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::Tokens, }, observers::{CanTrack, HitcountsMapObserver, TimeObserver}, @@ -55,11 +56,19 @@ struct Opt { short, long, value_parser = Cores::from_cmdline, - help = "Spawn a client in each of the provided cores. Broker runs in the 0th core. 'all' to select all available cores. 'none' to run a client without binding to any core. eg: '1,2-4,6' selects the cores 1,2,3,4,6.", + help = "Spawn clients in each of the provided cores. Broker runs in the 0th core. 'all' to select all available cores. 'none' to run a client without binding to any core. eg: '1,2-4,6' selects the cores 1,2,3,4,6.", name = "CORES" )] cores: Cores, + #[arg( + long, + help = "Spawn n clients on each core, this is useful if clients don't fully load a client, e.g. because they `sleep` often.", + name = "OVERCOMMIT", + default_value = "1" + )] + overcommit: usize, + #[arg( short = 'p', long, @@ -131,12 +140,12 @@ pub extern "C" fn libafl_main() { let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory"); - let monitor = OnDiskTOMLMonitor::new( + let monitor = OnDiskTomlMonitor::new( "./fuzzer_stats.toml", MultiMonitor::new(|s| println!("{s}")), ); - let mut run_client = |state: Option<_>, mut restarting_mgr, _core_id| { + let mut run_client = |state: Option<_>, mut restarting_mgr, _client_description| { // Create an observation channel using the coverage map let edges_observer = HitcountsMapObserver::new(unsafe { std_edges_map_observer("edges") }).track_indices(); @@ -203,7 +212,9 @@ pub extern "C" fn libafl_main() { let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -231,7 +242,7 @@ pub extern "C" fn libafl_main() { // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -253,6 +264,7 @@ pub extern "C" fn libafl_main() { .monitor(monitor) .run_client(&mut run_client) .cores(&cores) + .overcommit(opt.overcommit) .broker_port(broker_port) .remote_broker_addr(opt.remote_broker_addr) .stdout_file(Some("/dev/null")) diff --git a/fuzzers/libfuzzer_libpng_norestart/.gitignore b/fuzzers/inprocess/libfuzzer_libpng_norestart/.gitignore similarity index 100% rename from fuzzers/libfuzzer_libpng_norestart/.gitignore rename to fuzzers/inprocess/libfuzzer_libpng_norestart/.gitignore diff --git a/fuzzers/inprocess/libfuzzer_libpng_norestart/Cargo.toml b/fuzzers/inprocess/libfuzzer_libpng_norestart/Cargo.toml new file mode 100644 index 0000000000..f278474d4e --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_libpng_norestart/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "libfuzzer_libpng_launcher_norestart" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +env_logger = "0.11.5" +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts", features = [ + "errors_backtrace", +] } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } + +clap = { version = "4.5.18", features = ["derive"] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } + +[lib] +name = "libfuzzer_libpng" +crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_norestart/Makefile.toml b/fuzzers/inprocess/libfuzzer_libpng_norestart/Makefile.toml similarity index 72% rename from fuzzers/libfuzzer_libpng_norestart/Makefile.toml rename to fuzzers/inprocess/libfuzzer_libpng_norestart/Makefile.toml index e63e7bc674..5769da2f63 100644 --- a/fuzzers/libfuzzer_libpng_norestart/Makefile.toml +++ b/fuzzers/inprocess/libfuzzer_libpng_norestart/Makefile.toml @@ -1,17 +1,21 @@ # Variables [env] -FUZZER_NAME='fuzzer_libpng_launcher' -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +FUZZER_NAME = 'fuzzer_libpng_launcher' +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' LIBAFL_CXX = '${CARGO_TARGET_DIR}/${PROFILE}/libafl_cxx' FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' PROJECT_DIR = { script = ["pwd"] } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this platform" ''' @@ -22,9 +26,9 @@ mac_alias = "libpng_unix" windows_alias = "unsupported" [tasks.libpng_unix] -condition = { files_not_exist = ["./libpng-1.6.37"]} -script_runner="@shell" -script=''' +condition = { files_not_exist = ["./libpng-1.6.37"] } +script_runner = "@shell" +script = ''' wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz tar -xvf v1.6.37.tar.gz ''' @@ -36,7 +40,7 @@ windows_alias = "unsupported" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] # Library [tasks.lib] @@ -45,13 +49,13 @@ mac_alias = "lib_unix" windows_alias = "unsupported" [tasks.lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd "${PROJECT_DIR}" make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" ''' -dependencies = [ "libpng", "cc" ] +dependencies = ["libpng", "cc"] # Harness @@ -62,8 +66,17 @@ windows_alias = "unsupported" [tasks.fuzzer_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}", "-lm", "-lz"] -dependencies = [ "lib", "cc" ] +args = [ + "${PROJECT_DIR}/harness.cc", + "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", + "-I", + "${PROJECT_DIR}/libpng-1.6.37/", + "-o", + "${FUZZER_NAME}", + "-lm", + "-lz", +] +dependencies = ["lib", "cc"] # Run the fuzzer [tasks.run] @@ -73,13 +86,13 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf corpus/ || true mkdir corpus/ || true cp seeds/* corpus/ || true ./${FUZZER_NAME} --cores 0 --input ./corpus ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Test [tasks.test] @@ -89,7 +102,7 @@ windows_alias = "unsupported" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true rm -rf corpus/ || true mkdir corpus/ || true @@ -102,7 +115,7 @@ else exit 1 fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] @@ -113,8 +126,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -rf corpus/ || true rm -f ./${FUZZER_NAME} make -C libpng-1.6.37 clean diff --git a/fuzzers/libfuzzer_libpng_norestart/README.md b/fuzzers/inprocess/libfuzzer_libpng_norestart/README.md similarity index 100% rename from fuzzers/libfuzzer_libpng_norestart/README.md rename to fuzzers/inprocess/libfuzzer_libpng_norestart/README.md diff --git a/fuzzers/libfuzzer_libpng_norestart/harness.cc b/fuzzers/inprocess/libfuzzer_libpng_norestart/harness.cc similarity index 100% rename from fuzzers/libfuzzer_libpng_norestart/harness.cc rename to fuzzers/inprocess/libfuzzer_libpng_norestart/harness.cc diff --git a/fuzzers/libfuzzer_stb_image/corpus/not_kitty.png b/fuzzers/inprocess/libfuzzer_libpng_norestart/seeds/not_kitty.png similarity index 100% rename from fuzzers/libfuzzer_stb_image/corpus/not_kitty.png rename to fuzzers/inprocess/libfuzzer_libpng_norestart/seeds/not_kitty.png diff --git a/fuzzers/libfuzzer_stb_image/corpus/not_kitty_alpha.png b/fuzzers/inprocess/libfuzzer_libpng_norestart/seeds/not_kitty_alpha.png similarity index 100% rename from fuzzers/libfuzzer_stb_image/corpus/not_kitty_alpha.png rename to fuzzers/inprocess/libfuzzer_libpng_norestart/seeds/not_kitty_alpha.png diff --git a/fuzzers/libfuzzer_stb_image/corpus/not_kitty_gamma.png b/fuzzers/inprocess/libfuzzer_libpng_norestart/seeds/not_kitty_gamma.png similarity index 100% rename from fuzzers/libfuzzer_stb_image/corpus/not_kitty_gamma.png rename to fuzzers/inprocess/libfuzzer_libpng_norestart/seeds/not_kitty_gamma.png diff --git a/fuzzers/libfuzzer_stb_image/corpus/not_kitty_icc.png b/fuzzers/inprocess/libfuzzer_libpng_norestart/seeds/not_kitty_icc.png similarity index 100% rename from fuzzers/libfuzzer_stb_image/corpus/not_kitty_icc.png rename to fuzzers/inprocess/libfuzzer_libpng_norestart/seeds/not_kitty_icc.png diff --git a/fuzzers/libfuzzer_libpng_centralized/src/bin/libafl_cc.rs b/fuzzers/inprocess/libfuzzer_libpng_norestart/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_centralized/src/bin/libafl_cc.rs rename to fuzzers/inprocess/libfuzzer_libpng_norestart/src/bin/libafl_cc.rs diff --git a/fuzzers/nautilus_sync/src/bin/libafl_cxx.rs b/fuzzers/inprocess/libfuzzer_libpng_norestart/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/nautilus_sync/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/libfuzzer_libpng_norestart/src/bin/libafl_cxx.rs diff --git a/fuzzers/libfuzzer_libpng_norestart/src/lib.rs b/fuzzers/inprocess/libfuzzer_libpng_norestart/src/lib.rs similarity index 94% rename from fuzzers/libfuzzer_libpng_norestart/src/lib.rs rename to fuzzers/inprocess/libfuzzer_libpng_norestart/src/lib.rs index a792467aaf..5acd766852 100644 --- a/fuzzers/libfuzzer_libpng_norestart/src/lib.rs +++ b/fuzzers/inprocess/libfuzzer_libpng_norestart/src/lib.rs @@ -10,17 +10,19 @@ use clap::Parser; use libafl::{ corpus::{Corpus, InMemoryOnDiskCorpus, OnDiskCorpus}, events::{ - launcher::Launcher, llmp::LlmpShouldSaveState, EventConfig, EventRestarter, - LlmpRestartingEventManager, + launcher::{ClientDescription, Launcher}, + llmp::LlmpShouldSaveState, + EventConfig, EventRestarter, LlmpRestartingEventManager, }, executors::{inprocess::InProcessExecutor, ExitKind}, feedback_or, feedback_or_fast, feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, fuzzer::{Fuzzer, StdFuzzer}, inputs::{BytesInput, HasTargetBytes}, - monitors::{MultiMonitor, OnDiskTOMLMonitor}, + monitors::{MultiMonitor, OnDiskTomlMonitor}, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::Tokens, }, observers::{CanTrack, HitcountsMapObserver, TimeObserver}, @@ -154,14 +156,14 @@ pub extern "C" fn libafl_main() { let shmem_provider = MmapShMemProvider::new().expect("Failed to init shared memory"); - let monitor = OnDiskTOMLMonitor::new( + let monitor = OnDiskTomlMonitor::new( "./fuzzer_stats.toml", MultiMonitor::new(|s| println!("{s}")), ); let mut run_client = |state: Option<_>, mut restarting_mgr: LlmpRestartingEventManager<_, _, _>, - core_id| { + client_description: ClientDescription| { // Create an observation channel using the coverage map let edges_observer = HitcountsMapObserver::new(unsafe { std_edges_map_observer("edges") }).track_indices(); @@ -228,7 +230,9 @@ pub extern "C" fn libafl_main() { let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -244,7 +248,7 @@ pub extern "C" fn libafl_main() { // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -256,7 +260,7 @@ pub extern "C" fn libafl_main() { &mut executor, &mut restarting_mgr, &opt.input, - &core_id, + &client_description.core_id(), &cores, ) .unwrap_or_else(|_| panic!("Failed to load initial corpus at {:?}", &opt.input)); diff --git a/fuzzers/libfuzzer_libpng/.gitignore b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/.gitignore similarity index 100% rename from fuzzers/libfuzzer_libpng/.gitignore rename to fuzzers/inprocess/libfuzzer_libpng_tcp_manager/.gitignore diff --git a/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/Cargo.toml b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/Cargo.toml new file mode 100644 index 0000000000..190ac85c19 --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "libfuzzer_libpng_tcp_manager" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] +# Forces a crash +crash = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +libafl = { path = "../../../libafl", features = ["default", "tcp_manager"] } +# libafl = { path = "../../../libafl/", features = ["default"] } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", + "sancov_cmplog", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } + +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } + +[lib] +name = "libfuzzer_libpng" +crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/Makefile.toml b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/Makefile.toml similarity index 71% rename from fuzzers/libfuzzer_libpng_tcp_manager/Makefile.toml rename to fuzzers/inprocess/libfuzzer_libpng_tcp_manager/Makefile.toml index fb1dc6cfd2..7b0c8d0213 100644 --- a/fuzzers/libfuzzer_libpng_tcp_manager/Makefile.toml +++ b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/Makefile.toml @@ -1,17 +1,21 @@ # Variables [env] -FUZZER_NAME='fuzzer_libpng' +FUZZER_NAME = 'fuzzer_libpng' PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' LIBAFL_CXX = '${CARGO_TARGET_DIR}/${PROFILE}/libafl_cxx' FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -22,9 +26,9 @@ mac_alias = "libpng_unix" windows_alias = "unsupported" [tasks.libpng_unix] -condition = { files_not_exist = ["./libpng-1.6.37"]} -script_runner="@shell" -script=''' +condition = { files_not_exist = ["./libpng-1.6.37"] } +script_runner = "@shell" +script = ''' wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz tar -xvf v1.6.37.tar.gz ''' @@ -37,7 +41,7 @@ windows_alias = "unsupported" [tasks.cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] linux_alias = "cc_unix" @@ -46,7 +50,7 @@ windows_alias = "unsupported" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.crash_cxx] linux_alias = "crash_cxx_unix" @@ -55,7 +59,7 @@ windows_alias = "unsupported" [tasks.crash_cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--features=crash"] +args = ["build", "--profile", "${PROFILE}", "--features=crash"] [tasks.crash_cc] linux_alias = "crash_cc_unix" @@ -64,7 +68,7 @@ windows_alias = "unsupported" [tasks.crash_cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--features=crash"] +args = ["build", "--profile", "${PROFILE}", "--features=crash"] # Library [tasks.lib] @@ -73,13 +77,13 @@ mac_alias = "lib_unix" windows_alias = "unsupported" [tasks.lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd "${PROJECT_DIR}" make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" ''' -dependencies = [ "libpng", "cxx", "cc" ] +dependencies = ["libpng", "cxx", "cc"] # Library [tasks.crash_lib] @@ -88,13 +92,13 @@ mac_alias = "crash_lib_unix" windows_alias = "unsupported" [tasks.crash_lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd "${PROJECT_DIR}" make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" ''' -dependencies = [ "libpng", "crash_cxx", "crash_cc" ] +dependencies = ["libpng", "crash_cxx", "crash_cc"] # Harness [tasks.fuzzer] @@ -104,8 +108,17 @@ windows_alias = "unsupported" [tasks.fuzzer_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}", "-lm", "-lz"] -dependencies = [ "lib", "cxx", "cc" ] +args = [ + "${PROJECT_DIR}/harness.cc", + "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", + "-I", + "${PROJECT_DIR}/libpng-1.6.37/", + "-o", + "${FUZZER_NAME}", + "-lm", + "-lz", +] +dependencies = ["lib", "cxx", "cc"] # Crashing Harness [tasks.fuzzer_crash] @@ -115,8 +128,17 @@ windows_alias = "unsupported" [tasks.fuzzer_crash_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}_crash", "-lm", "-lz"] -dependencies = [ "crash_lib", "crash_cxx", "crash_cc" ] +args = [ + "${PROJECT_DIR}/harness.cc", + "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", + "-I", + "${PROJECT_DIR}/libpng-1.6.37/", + "-o", + "${FUZZER_NAME}_crash", + "-lm", + "-lz", +] +dependencies = ["crash_lib", "crash_cxx", "crash_cc"] # Run the fuzzer [tasks.run] @@ -126,12 +148,12 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} & sleep 0.2 ./${FUZZER_NAME} 2>/dev/null ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Run the fuzzer with a crash @@ -142,13 +164,12 @@ windows_alias = "unsupported" [tasks.crash_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME}_crash & sleep 0.2 ./${FUZZER_NAME}_crash 2>/dev/null ''' -dependencies = [ "fuzzer_crash" ] - +dependencies = ["fuzzer_crash"] # Test @@ -159,7 +180,7 @@ windows_alias = "unsupported" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true (timeout 31s ./${FUZZER_NAME} | tee fuzz_stdout.log 2>/dev/null || true) & sleep 0.2 @@ -171,17 +192,17 @@ else exit 1 fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test_mac] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true (timeout 31s ./${FUZZER_NAME} | tee fuzz_stdout.log 2>/dev/null || true) & sleep 0.2 timeout 30s ./${FUZZER_NAME} >/dev/null 2>/dev/null || true ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] @@ -192,8 +213,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} make -C libpng-1.6.37 clean cargo clean diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/README.md b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/README.md similarity index 100% rename from fuzzers/libfuzzer_libpng_tcp_manager/README.md rename to fuzzers/inprocess/libfuzzer_libpng_tcp_manager/README.md diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty.png b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/corpus/not_kitty.png similarity index 100% rename from fuzzers/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty.png rename to fuzzers/inprocess/libfuzzer_libpng_tcp_manager/corpus/not_kitty.png diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_alpha.png b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_alpha.png rename to fuzzers/inprocess/libfuzzer_libpng_tcp_manager/corpus/not_kitty_alpha.png diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_gamma.png b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_gamma.png rename to fuzzers/inprocess/libfuzzer_libpng_tcp_manager/corpus/not_kitty_gamma.png diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_icc.png b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_icc.png rename to fuzzers/inprocess/libfuzzer_libpng_tcp_manager/corpus/not_kitty_icc.png diff --git a/fuzzers/libfuzzer_libpng_cmin/harness.cc b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/harness.cc similarity index 100% rename from fuzzers/libfuzzer_libpng_cmin/harness.cc rename to fuzzers/inprocess/libfuzzer_libpng_tcp_manager/harness.cc diff --git a/fuzzers/libfuzzer_libpng_norestart/src/bin/libafl_cc.rs b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_norestart/src/bin/libafl_cc.rs rename to fuzzers/inprocess/libfuzzer_libpng_tcp_manager/src/bin/libafl_cc.rs diff --git a/fuzzers/nyx_libxml2_parallel/src/bin/libafl_cxx.rs b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/nyx_libxml2_parallel/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/libfuzzer_libpng_tcp_manager/src/bin/libafl_cxx.rs diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/src/lib.rs b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/src/lib.rs similarity index 93% rename from fuzzers/libfuzzer_libpng_tcp_manager/src/lib.rs rename to fuzzers/inprocess/libfuzzer_libpng_tcp_manager/src/lib.rs index 60dab533fa..e2b23d599e 100644 --- a/fuzzers/libfuzzer_libpng_tcp_manager/src/lib.rs +++ b/fuzzers/inprocess/libfuzzer_libpng_tcp_manager/src/lib.rs @@ -15,7 +15,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::Tokens, }, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, @@ -140,14 +141,19 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let mutator = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations())); - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); let mut stages = tuple_list!(calibration, power); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( &edges_observer, - StdWeightedScheduler::with_schedule(&mut state, &edges_observer, Some(PowerSchedule::FAST)), + StdWeightedScheduler::with_schedule( + &mut state, + &edges_observer, + Some(PowerSchedule::fast()), + ), ); // A fuzzer with feedbacks and a corpus scheduler @@ -165,7 +171,9 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re *addr = 1; } } - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -182,7 +190,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } diff --git a/fuzzers/libfuzzer_stb_image/.gitignore b/fuzzers/inprocess/libfuzzer_stb_image/.gitignore similarity index 100% rename from fuzzers/libfuzzer_stb_image/.gitignore rename to fuzzers/inprocess/libfuzzer_stb_image/.gitignore diff --git a/fuzzers/inprocess/libfuzzer_stb_image/Cargo.toml b/fuzzers/inprocess/libfuzzer_stb_image/Cargo.toml new file mode 100644 index 0000000000..ab01b881e0 --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_stb_image/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "libfuzzer_stb_image" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" +build = "build.rs" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_edges", + "sancov_cmplog", + "libfuzzer", + "libfuzzer_no_link_main", +] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } diff --git a/fuzzers/libfuzzer_stb_image/Makefile.toml b/fuzzers/inprocess/libfuzzer_stb_image/Makefile.toml similarity index 75% rename from fuzzers/libfuzzer_stb_image/Makefile.toml rename to fuzzers/inprocess/libfuzzer_stb_image/Makefile.toml index 9c17671641..39a7908a60 100644 --- a/fuzzers/libfuzzer_stb_image/Makefile.toml +++ b/fuzzers/inprocess/libfuzzer_stb_image/Makefile.toml @@ -1,30 +1,32 @@ # Variables [env] -FUZZER_NAME='libfuzzer_stb_image' +FUZZER_NAME = 'libfuzzer_stb_image' PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } PROFILE = { value = "release" } -PROFILE_DIR = {value = "release" } -LIBAFL_CC = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc', mapping = {"windows" = '.\\target\\${PROFILE_DIR}\\libafl_cc.exe'} } -LIBAFL_CXX = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx', mapping = {"windows" = '.\\target\\${PROFILE_DIR}\\libafl_cxx.exe'} } -FUZZER = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libfuzzer_stb_image', mapping = {"windows" = '.\\target\\${PROFILE_DIR}\\libfuzzer_stb_image.exe'} } +PROFILE_DIR = { value = "release" } +LIBAFL_CC = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc', mapping = { "windows" = '.\\target\\${PROFILE_DIR}\\libafl_cc.exe' } } +LIBAFL_CXX = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx', mapping = { "windows" = '.\\target\\${PROFILE_DIR}\\libafl_cxx.exe' } } +FUZZER = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libfuzzer_stb_image', mapping = { "windows" = '.\\target\\${PROFILE_DIR}\\libfuzzer_stb_image.exe' } } # Compilers [tasks.cxx] condition = { files_not_exist = ["${LIBAFL_CXX}"] } command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] condition = { files_not_exist = ["${LIBAFL_CC}"] } command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] # Build the fuzzer [tasks.fuzzer] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cargo build --profile ${PROFILE} cp ${FUZZER} . ''' @@ -37,20 +39,20 @@ windows_alias = "run_windows" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} & sleep 0.2 ./${FUZZER_NAME} ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.run_windows] # Do nothing script_runner = "@shell" -script=''' +script = ''' echo "Not integrated into cargo-make yet." ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test] @@ -60,7 +62,7 @@ windows_alias = "test_windows" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true (timeout 31s ./${FUZZER_NAME} | tee fuzz_stdout.log 2>/dev/null || true) & sleep 0.2 @@ -72,33 +74,33 @@ else exit 1 fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test_mac] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true (timeout 31s ./${FUZZER_NAME} | tee fuzz_stdout.log 2>/dev/null || true) & sleep 0.2 timeout 30s ./${FUZZER_NAME} >/dev/null 2>/dev/null || true ''' -dependencies = [ "fuzzer"] +dependencies = ["fuzzer"] [tasks.test_windows] # Do nothing script_runner = "@shell" -script=''' +script = ''' echo "Not integrated into cargo-make yet." ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} cargo clean ''' diff --git a/fuzzers/libfuzzer_stb_image/README.md b/fuzzers/inprocess/libfuzzer_stb_image/README.md similarity index 100% rename from fuzzers/libfuzzer_stb_image/README.md rename to fuzzers/inprocess/libfuzzer_stb_image/README.md diff --git a/fuzzers/libfuzzer_stb_image/build.rs b/fuzzers/inprocess/libfuzzer_stb_image/build.rs similarity index 100% rename from fuzzers/libfuzzer_stb_image/build.rs rename to fuzzers/inprocess/libfuzzer_stb_image/build.rs diff --git a/fuzzers/libfuzzer_stb_image_sugar/corpus/not_kitty.png b/fuzzers/inprocess/libfuzzer_stb_image/corpus/not_kitty.png similarity index 100% rename from fuzzers/libfuzzer_stb_image_sugar/corpus/not_kitty.png rename to fuzzers/inprocess/libfuzzer_stb_image/corpus/not_kitty.png diff --git a/fuzzers/libfuzzer_stb_image_sugar/corpus/not_kitty_alpha.png b/fuzzers/inprocess/libfuzzer_stb_image/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/libfuzzer_stb_image_sugar/corpus/not_kitty_alpha.png rename to fuzzers/inprocess/libfuzzer_stb_image/corpus/not_kitty_alpha.png diff --git a/fuzzers/libfuzzer_stb_image_sugar/corpus/not_kitty_gamma.png b/fuzzers/inprocess/libfuzzer_stb_image/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/libfuzzer_stb_image_sugar/corpus/not_kitty_gamma.png rename to fuzzers/inprocess/libfuzzer_stb_image/corpus/not_kitty_gamma.png diff --git a/fuzzers/libfuzzer_stb_image_sugar/corpus/not_kitty_icc.png b/fuzzers/inprocess/libfuzzer_stb_image/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/libfuzzer_stb_image_sugar/corpus/not_kitty_icc.png rename to fuzzers/inprocess/libfuzzer_stb_image/corpus/not_kitty_icc.png diff --git a/fuzzers/libfuzzer_stb_image/harness.c b/fuzzers/inprocess/libfuzzer_stb_image/harness.c similarity index 100% rename from fuzzers/libfuzzer_stb_image/harness.c rename to fuzzers/inprocess/libfuzzer_stb_image/harness.c diff --git a/fuzzers/libfuzzer_stb_image/src/main.rs b/fuzzers/inprocess/libfuzzer_stb_image/src/main.rs similarity index 96% rename from fuzzers/libfuzzer_stb_image/src/main.rs rename to fuzzers/inprocess/libfuzzer_stb_image/src/main.rs index c59fa70cf7..77e7e06f1d 100644 --- a/fuzzers/libfuzzer_stb_image/src/main.rs +++ b/fuzzers/inprocess/libfuzzer_stb_image/src/main.rs @@ -16,7 +16,7 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator, token_mutations::I2SRandReplace, }, observers::{CanTrack, TimeObserver}, @@ -118,7 +118,9 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -137,7 +139,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } diff --git a/fuzzers/libfuzzer_stb_image/stb_image.h b/fuzzers/inprocess/libfuzzer_stb_image/stb_image.h similarity index 100% rename from fuzzers/libfuzzer_stb_image/stb_image.h rename to fuzzers/inprocess/libfuzzer_stb_image/stb_image.h diff --git a/fuzzers/libfuzzer_stb_image_sugar/.gitignore b/fuzzers/inprocess/libfuzzer_stb_image_sugar/.gitignore similarity index 100% rename from fuzzers/libfuzzer_stb_image_sugar/.gitignore rename to fuzzers/inprocess/libfuzzer_stb_image_sugar/.gitignore diff --git a/fuzzers/inprocess/libfuzzer_stb_image_sugar/Cargo.toml b/fuzzers/inprocess/libfuzzer_stb_image_sugar/Cargo.toml new file mode 100644 index 0000000000..d34dce8319 --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_stb_image_sugar/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "libfuzzer_stb_image_sugar" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" +build = "build.rs" +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_edges", + "sancov_cmplog", + "libfuzzer", +] } +libafl_sugar = { path = "../../../libafl_sugar" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } diff --git a/fuzzers/libfuzzer_stb_image_sugar/Makefile.toml b/fuzzers/inprocess/libfuzzer_stb_image_sugar/Makefile.toml similarity index 65% rename from fuzzers/libfuzzer_stb_image_sugar/Makefile.toml rename to fuzzers/inprocess/libfuzzer_stb_image_sugar/Makefile.toml index 357a3865be..e4a2111eb2 100644 --- a/fuzzers/libfuzzer_stb_image_sugar/Makefile.toml +++ b/fuzzers/inprocess/libfuzzer_stb_image_sugar/Makefile.toml @@ -1,30 +1,32 @@ # Variables [env] -FUZZER_NAME='libfuzzer_stb_image_sugar' +FUZZER_NAME = 'libfuzzer_stb_image_sugar' PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } PROFILE = { value = "release" } -PROFILE_DIR = {value = "release" } -LIBAFL_CC = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc", mapping = {"windows" = '.\\target\\${PROFILE_DIR}\\libafl_cc.exe'} } -LIBAFL_CXX = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx", mapping = {"windows" = '.\\target\\${PROFILE_DIR}\\libafl_cxx.exe'} } -FUZZER = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libfuzzer_stb_image_sugar", mapping = {"windows" = '.\\target\\${PROFILE_DIR}\\libfuzzer_stb_image_sugar.exe'} } +PROFILE_DIR = { value = "release" } +LIBAFL_CC = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc", mapping = { "windows" = '.\\target\\${PROFILE_DIR}\\libafl_cc.exe' } } +LIBAFL_CXX = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx", mapping = { "windows" = '.\\target\\${PROFILE_DIR}\\libafl_cxx.exe' } } +FUZZER = { source = "${CARGO_MAKE_RUST_TARGET_OS}", default_value = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libfuzzer_stb_image_sugar", mapping = { "windows" = '.\\target\\${PROFILE_DIR}\\libfuzzer_stb_image_sugar.exe' } } # Compilers [tasks.cxx] condition = { files_not_exist = ["${LIBAFL_CXX}"] } command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] condition = { files_not_exist = ["${LIBAFL_CC}"] } command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] # Build the fuzzer [tasks.fuzzer] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cargo build --profile ${PROFILE} cp ${FUZZER} . ''' @@ -37,18 +39,18 @@ windows_alias = "run_windows" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} & ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.run_windows] # Do nothing script_runner = "@shell" -script=''' +script = ''' echo "Not integrated into cargo-make yet." ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test] @@ -58,28 +60,33 @@ windows_alias = "test_windows" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 31s ./${FUZZER_NAME} 2>/dev/null | tee fuzz_stdout.log || true -echo "The test is skipped. See https://github.com/AFLplusplus/LibAFL/issues/1176" +if [ -z "$(grep "corpus: 30" fuzz_stdout.log)" ]; then + echo "Fuzzer does not generate any testcases or any crashes" + exit 1 +else + echo "Fuzzer is working" +fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test_windows] # Do nothing script_runner = "@shell" -script=''' +script = ''' echo "Not integrated into cargo-make yet." ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} cargo clean ''' diff --git a/fuzzers/libfuzzer_stb_image_sugar/README.md b/fuzzers/inprocess/libfuzzer_stb_image_sugar/README.md similarity index 100% rename from fuzzers/libfuzzer_stb_image_sugar/README.md rename to fuzzers/inprocess/libfuzzer_stb_image_sugar/README.md diff --git a/fuzzers/libfuzzer_stb_image_sugar/build.rs b/fuzzers/inprocess/libfuzzer_stb_image_sugar/build.rs similarity index 100% rename from fuzzers/libfuzzer_stb_image_sugar/build.rs rename to fuzzers/inprocess/libfuzzer_stb_image_sugar/build.rs diff --git a/fuzzers/qemu_cmin/corpus/not_kitty.png b/fuzzers/inprocess/libfuzzer_stb_image_sugar/corpus/not_kitty.png similarity index 100% rename from fuzzers/qemu_cmin/corpus/not_kitty.png rename to fuzzers/inprocess/libfuzzer_stb_image_sugar/corpus/not_kitty.png diff --git a/fuzzers/qemu_cmin/corpus/not_kitty_alpha.png b/fuzzers/inprocess/libfuzzer_stb_image_sugar/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/qemu_cmin/corpus/not_kitty_alpha.png rename to fuzzers/inprocess/libfuzzer_stb_image_sugar/corpus/not_kitty_alpha.png diff --git a/fuzzers/qemu_cmin/corpus/not_kitty_gamma.png b/fuzzers/inprocess/libfuzzer_stb_image_sugar/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/qemu_cmin/corpus/not_kitty_gamma.png rename to fuzzers/inprocess/libfuzzer_stb_image_sugar/corpus/not_kitty_gamma.png diff --git a/fuzzers/qemu_cmin/corpus/not_kitty_icc.png b/fuzzers/inprocess/libfuzzer_stb_image_sugar/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/qemu_cmin/corpus/not_kitty_icc.png rename to fuzzers/inprocess/libfuzzer_stb_image_sugar/corpus/not_kitty_icc.png diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/harness.c b/fuzzers/inprocess/libfuzzer_stb_image_sugar/harness.c similarity index 100% rename from fuzzers/libfuzzer_stb_image_concolic/fuzzer/harness.c rename to fuzzers/inprocess/libfuzzer_stb_image_sugar/harness.c diff --git a/fuzzers/libfuzzer_stb_image_sugar/src/main.rs b/fuzzers/inprocess/libfuzzer_stb_image_sugar/src/main.rs similarity index 93% rename from fuzzers/libfuzzer_stb_image_sugar/src/main.rs rename to fuzzers/inprocess/libfuzzer_stb_image_sugar/src/main.rs index ef4f169fd1..70722fdd53 100644 --- a/fuzzers/libfuzzer_stb_image_sugar/src/main.rs +++ b/fuzzers/inprocess/libfuzzer_stb_image_sugar/src/main.rs @@ -31,7 +31,7 @@ pub fn main() { fn fuzz(input_dirs: &[PathBuf], output_dir: PathBuf, cores: &Cores, broker_port: u16) { // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -40,7 +40,7 @@ fn fuzz(input_dirs: &[PathBuf], output_dir: PathBuf, cores: &Cores, broker_port: .output_dir(output_dir) .cores(cores) .broker_port(broker_port) - .harness(|buf| { + .harness(|buf| unsafe { libfuzzer_test_one_input(buf); }) .build() diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/stb_image.h b/fuzzers/inprocess/libfuzzer_stb_image_sugar/stb_image.h similarity index 100% rename from fuzzers/libfuzzer_stb_image_concolic/fuzzer/stb_image.h rename to fuzzers/inprocess/libfuzzer_stb_image_sugar/stb_image.h diff --git a/fuzzers/libfuzzer_windows_asan/.gitignore b/fuzzers/inprocess/libfuzzer_windows_asan/.gitignore similarity index 100% rename from fuzzers/libfuzzer_windows_asan/.gitignore rename to fuzzers/inprocess/libfuzzer_windows_asan/.gitignore diff --git a/fuzzers/inprocess/libfuzzer_windows_asan/Cargo.toml b/fuzzers/inprocess/libfuzzer_windows_asan/Cargo.toml new file mode 100644 index 0000000000..cecbc2b545 --- /dev/null +++ b/fuzzers/inprocess/libfuzzer_windows_asan/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "libfuzzer_windows_asan" +version = "0.14.1" +authors = ["Max Ammann "] +edition = "2021" +categories = ["development-tools::testing"] + +[features] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "libfuzzer", + "sancov_pcguard_edges", +] } +libafl_cc = { path = "../../../libafl_cc" } +log = { version = "0.4.22", features = ["release_max_level_info"] } + +[build-dependencies] +cc = { version = "1.1.21" } + +[lib] +name = "libfuzzer_windows_asan" +crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_windows_asan/Makefile.toml b/fuzzers/inprocess/libfuzzer_windows_asan/Makefile.toml similarity index 73% rename from fuzzers/libfuzzer_windows_asan/Makefile.toml rename to fuzzers/inprocess/libfuzzer_windows_asan/Makefile.toml index 16112f488d..1c330a0bd2 100644 --- a/fuzzers/libfuzzer_windows_asan/Makefile.toml +++ b/fuzzers/inprocess/libfuzzer_windows_asan/Makefile.toml @@ -1,13 +1,17 @@ # Variables [env] -FUZZER_NAME='libfuzzer_windows_asan' -CARGO_TARGET_DIR = { value = "./target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +FUZZER_NAME = 'libfuzzer_windows_asan' +CARGO_TARGET_DIR = { value = "./target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this" ''' @@ -19,7 +23,7 @@ windows_alias = "cxx_unix" [tasks.cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] linux_alias = "cc_unix" @@ -28,7 +32,7 @@ windows_alias = "cc_unix" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.crash_cxx] linux_alias = "unsupported" @@ -47,7 +51,7 @@ mac_alias = "unsupported" windows_alias = "lib_unix" [tasks.lib_unix] -dependencies = [ "cxx", "cc" ] +dependencies = ["cxx", "cc"] # Harness [tasks.fuzzer] @@ -58,7 +62,7 @@ windows_alias = "fuzzer_windows" [tasks.fuzzer_windows] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" args = ["./harness.cpp", "-o", "${FUZZER_NAME}.exe"] -dependencies = [ "lib", "cxx", "cc" ] +dependencies = ["lib", "cxx", "cc"] # Run the fuzzer [tasks.run] @@ -68,9 +72,9 @@ windows_alias = "run_windows" # TODO [tasks.run_windows] script_runner = "@shell" -script=''' +script = ''' ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Test [tasks.test] @@ -80,14 +84,14 @@ windows_alias = "test_windows" # TODO [tasks.test_windows] script_runner = "@shell" -script=''' +script = ''' start "" "${FUZZER_NAME}.exe" start "" "${FUZZER_NAME}.exe" #ping is for timeout ping -n 10 127.0.0.1>NUL && taskkill /im ${FUZZER_NAME}.exe /F >nul 2>nul dir /a-d "crashes\*" && (echo Files exist) || (exit /b 1337) ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] @@ -98,8 +102,8 @@ windows_alias = "clean_windows" [tasks.clean_windows] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' del ./${FUZZER_NAME} cargo clean -''' \ No newline at end of file +''' diff --git a/fuzzers/libfuzzer_windows_asan/README.md b/fuzzers/inprocess/libfuzzer_windows_asan/README.md similarity index 100% rename from fuzzers/libfuzzer_windows_asan/README.md rename to fuzzers/inprocess/libfuzzer_windows_asan/README.md diff --git a/fuzzers/libfuzzer_windows_asan/corpus/hello_world b/fuzzers/inprocess/libfuzzer_windows_asan/corpus/hello_world similarity index 100% rename from fuzzers/libfuzzer_windows_asan/corpus/hello_world rename to fuzzers/inprocess/libfuzzer_windows_asan/corpus/hello_world diff --git a/fuzzers/libfuzzer_windows_asan/harness.cpp b/fuzzers/inprocess/libfuzzer_windows_asan/harness.cpp similarity index 100% rename from fuzzers/libfuzzer_windows_asan/harness.cpp rename to fuzzers/inprocess/libfuzzer_windows_asan/harness.cpp diff --git a/fuzzers/libfuzzer_windows_asan/src/bin/libafl_cc.rs b/fuzzers/inprocess/libfuzzer_windows_asan/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/libfuzzer_windows_asan/src/bin/libafl_cc.rs rename to fuzzers/inprocess/libfuzzer_windows_asan/src/bin/libafl_cc.rs diff --git a/fuzzers/libfuzzer_windows_asan/src/bin/libafl_cxx.rs b/fuzzers/inprocess/libfuzzer_windows_asan/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/libfuzzer_windows_asan/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/libfuzzer_windows_asan/src/bin/libafl_cxx.rs diff --git a/fuzzers/libfuzzer_windows_asan/src/lib.rs b/fuzzers/inprocess/libfuzzer_windows_asan/src/lib.rs similarity index 92% rename from fuzzers/libfuzzer_windows_asan/src/lib.rs rename to fuzzers/inprocess/libfuzzer_windows_asan/src/lib.rs index 0d1401a8d5..3ae7f82b15 100644 --- a/fuzzers/libfuzzer_windows_asan/src/lib.rs +++ b/fuzzers/inprocess/libfuzzer_windows_asan/src/lib.rs @@ -10,7 +10,10 @@ use libafl::{ fuzzer::{Fuzzer, StdFuzzer}, inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, - mutators::scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + mutators::{ + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, + }, observers::{CanTrack, HitcountsMapObserver, TimeObserver}, schedulers::{ powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, StdWeightedScheduler, @@ -107,14 +110,19 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let mutator = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations())); - let power = StdPowerMutationalStage::new(mutator); + let power: StdPowerMutationalStage<_, _, BytesInput, _, _> = + StdPowerMutationalStage::new(mutator); let mut stages = tuple_list!(calibration, power); // A minimization+queue policy to get testcasess from the corpus let scheduler = IndexesLenTimeMinimizerScheduler::new( &edges_observer, - StdWeightedScheduler::with_schedule(&mut state, &edges_observer, Some(PowerSchedule::FAST)), + StdWeightedScheduler::with_schedule( + &mut state, + &edges_observer, + Some(PowerSchedule::fast()), + ), ); // A fuzzer with feedbacks and a corpus scheduler @@ -124,7 +132,9 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -147,7 +157,7 @@ fn fuzz(corpus_dirs: &[PathBuf], objective_dir: PathBuf, broker_port: u16) -> Re // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } diff --git a/fuzzers/inprocess/sqlite_centralized_multi_machine/Cargo.toml b/fuzzers/inprocess/sqlite_centralized_multi_machine/Cargo.toml new file mode 100644 index 0000000000..526258bc11 --- /dev/null +++ b/fuzzers/inprocess/sqlite_centralized_multi_machine/Cargo.toml @@ -0,0 +1,65 @@ +[package] +name = "libfuzzer_libpng_launcher_centralized_multi_machine" +version = "0.14.1" +authors = [ + "Romain Malmain ", + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[build-dependencies] +cc = { version = "1.1.21", features = ["parallel"] } +which = "6.0.3" + +[dependencies] +# no llmp compression for now, better perfs. +libafl = { path = "../../../libafl", default-features = false, features = [ + "std", + "derive", + "llmp_small_maps", + "llmp_broker_timeouts", + "rand_trait", + "fork", + "prelude", + "gzip", + "regex", + "serdeany_autoreg", + "tui_monitor", + "std", + "derive", + "rand_trait", + "fork", + "prelude", + "gzip", + "regex", + "scalability_introspection", + "multi_machine", + "errors_backtrace", +] } +libafl_bolts = { path = "../../../libafl_bolts", features = ["xxh3"] } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } + +clap = { version = "4.5.18", features = ["derive"] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } +env_logger = "0.11.5" + +[lib] +name = "libfuzzer_libpng" +crate-type = ["staticlib"] diff --git a/fuzzers/inprocess/sqlite_centralized_multi_machine/README.md b/fuzzers/inprocess/sqlite_centralized_multi_machine/README.md new file mode 100644 index 0000000000..b9ffdecb35 --- /dev/null +++ b/fuzzers/inprocess/sqlite_centralized_multi_machine/README.md @@ -0,0 +1,47 @@ +# Libfuzzer for libpng, with launcher + +This folder contains an example fuzzer for libpng, using LLMP for fast multi-process fuzzing and crash detection. +To show off crash detection, we added a `ud2` instruction to the harness, edit harness.cc if you want a non-crashing example. +It has been tested on Linux. + +In contrast to the normal libfuzzer libpng example, this uses the `launcher` feature, that automatically spawns `n` child processes, and binds them to a free core. + +## Build + +To build this example, run + +```bash +cargo build --release +``` + +This will build the library with the fuzzer (src/lib.rs) with the libfuzzer compatibility layer and the SanitizerCoverage runtime functions for coverage feedback. +In addition, it will also build two C and C++ compiler wrappers (bin/libafl_c(libafl_c/xx).rs) that you must use to compile the target. + +Then download libpng, and unpack the archive: +```bash +wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz +tar -xvf v1.6.37.tar.gz +``` + +Now compile libpng, using the libafl_cc compiler wrapper: + +```bash +cd libpng-1.6.37 +./configure +make CC=../target/release/libafl_cc CXX=../target/release/libafl_cxx -j `nproc` +``` + +You can find the static lib at `libpng-1.6.37/.libs/libpng16.a`. + +Now, we have to build the libfuzzer harness and link all together to create our fuzzer binary. + +``` +cd .. +./target/release/libafl_cxx ./harness.cc libpng-1.6.37/.libs/libpng16.a -I libpng-1.6.37/ -o fuzzer_libpng -lz -lm +``` + +Afterwards, the fuzzer will be ready to run. + +## Run + +Just run once, the launcher feature should do the rest. \ No newline at end of file diff --git a/fuzzers/inprocess/sqlite_centralized_multi_machine/build.sh b/fuzzers/inprocess/sqlite_centralized_multi_machine/build.sh new file mode 100755 index 0000000000..7ca12f128f --- /dev/null +++ b/fuzzers/inprocess/sqlite_centralized_multi_machine/build.sh @@ -0,0 +1,42 @@ +#!/bin/bash + +if [ ! -d "sqlite3" ]; then + curl 'https://sqlite.org/src/tarball/sqlite.tar.gz?r=c78cbf2e86850cc6' -o sqlite3.tar.gz && mkdir sqlite3 && pushd sqlite3 && tar xzf ../sqlite3.tar.gz --strip-components 1 && popd + mkdir corpus + find ./sqlite3 -name "*.test" -exec cp {} corpus/ \; +fi + +if [ "$1" = "d" ]; then + cargo build +else + cargo build --release +fi + +export CC=`pwd`/target/release/libafl_cc +export CXX=`pwd`/target/release/libafl_cxx +export CFLAGS='--libafl' +export CXXFLAGS='--libafl' +export CFLAGS="$CFLAGS -DSQLITE_MAX_LENGTH=128000000 \ + -DSQLITE_MAX_SQL_LENGTH=128000000 \ + -DSQLITE_MAX_MEMORY=25000000 \ + -DSQLITE_PRINTF_PRECISION_LIMIT=1048576 \ + -DSQLITE_DEBUG=1 \ + -DSQLITE_MAX_PAGE_COUNT=16384" +pushd sqlite3 + +if [ ! -f "Makefile" ]; then + echo "Run configure..." + ./configure +fi +make -j$(nproc) +make sqlite3.c +popd + +if [ "$1" = "release" ]; then + ./target/release/libafl_cc --libafl -I ./sqlite3 -c ./sqlite3/test/ossfuzz.c -o ./sqlite3/test/ossfuzz.o + ./target/release/libafl_cxx --libafl -o ossfuzz ./sqlite3/test/ossfuzz.o ./sqlite3/sqlite3.o -pthread -ldl -lz +else + ./target/debug/libafl_cc --libafl -I ./sqlite3 -c ./sqlite3/test/ossfuzz.c -o ./sqlite3/test/ossfuzz.o + ./target/debug/libafl_cxx --libafl -o ossfuzz ./sqlite3/test/ossfuzz.o ./sqlite3/sqlite3.o -pthread -ldl -lz +fi + diff --git a/fuzzers/inprocess/sqlite_centralized_multi_machine/run_child.sh b/fuzzers/inprocess/sqlite_centralized_multi_machine/run_child.sh new file mode 100755 index 0000000000..5ee94ffb0c --- /dev/null +++ b/fuzzers/inprocess/sqlite_centralized_multi_machine/run_child.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +./ossfuzz --cores 0-3 --input ./corpus --parent-addr 0.0.0.0:50000 --broker-port 3000 diff --git a/fuzzers/inprocess/sqlite_centralized_multi_machine/run_parent.sh b/fuzzers/inprocess/sqlite_centralized_multi_machine/run_parent.sh new file mode 100755 index 0000000000..7ef7721d0f --- /dev/null +++ b/fuzzers/inprocess/sqlite_centralized_multi_machine/run_parent.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +./ossfuzz --cores 0-1 --input ./corpus diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/src/bin/libafl_cc.rs b/fuzzers/inprocess/sqlite_centralized_multi_machine/src/bin/libafl_cc.rs similarity index 100% rename from fuzzers/libfuzzer_libpng_tcp_manager/src/bin/libafl_cc.rs rename to fuzzers/inprocess/sqlite_centralized_multi_machine/src/bin/libafl_cc.rs diff --git a/fuzzers/nyx_libxml2_standalone/src/bin/libafl_cxx.rs b/fuzzers/inprocess/sqlite_centralized_multi_machine/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/nyx_libxml2_standalone/src/bin/libafl_cxx.rs rename to fuzzers/inprocess/sqlite_centralized_multi_machine/src/bin/libafl_cxx.rs diff --git a/fuzzers/inprocess/sqlite_centralized_multi_machine/src/lib.rs b/fuzzers/inprocess/sqlite_centralized_multi_machine/src/lib.rs new file mode 100644 index 0000000000..66d9963f07 --- /dev/null +++ b/fuzzers/inprocess/sqlite_centralized_multi_machine/src/lib.rs @@ -0,0 +1,312 @@ +//! A libfuzzer-like fuzzer with llmp-multithreading support and restarts +//! The example harness is built for libpng. +//! In this example, you will see the use of the `launcher` feature. +//! The `launcher` will spawn new processes for each cpu core. +use core::time::Duration; +use std::{env, net::SocketAddr, path::PathBuf, str::FromStr}; + +use clap::{self, Parser}; +use libafl::{ + corpus::{Corpus, InMemoryCorpus, OnDiskCorpus}, + events::{ + centralized::CentralizedEventManager, launcher::CentralizedLauncher, + multi_machine::NodeDescriptor, ClientDescription, EventConfig, + }, + executors::{inprocess::InProcessExecutor, ExitKind}, + feedback_or, feedback_or_fast, + feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, + fuzzer::{Fuzzer, StdFuzzer}, + inputs::{BytesInput, HasTargetBytes}, + monitors::MultiMonitor, + mutators::{ + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, + token_mutations::Tokens, + }, + observers::{CanTrack, HitcountsMapObserver, TimeObserver}, + schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, + stages::mutational::StdMutationalStage, + state::{HasCorpus, StdState}, + Error, HasMetadata, +}; +use libafl_bolts::{ + core_affinity::Cores, + rands::StdRand, + shmem::{ShMemProvider, StdShMemProvider}, + tuples::{tuple_list, Merge}, + AsSlice, +}; +use libafl_targets::{libfuzzer_initialize, libfuzzer_test_one_input, std_edges_map_observer}; +use mimalloc::MiMalloc; + +#[global_allocator] +static GLOBAL: MiMalloc = MiMalloc; + +/// Parse a millis string to a [`Duration`]. Used for arg parsing. +fn timeout_from_millis_str(time: &str) -> Result { + Ok(Duration::from_millis(time.parse()?)) +} + +/// The commandline args this fuzzer accepts +#[derive(Debug, Parser)] +#[command( + name = "libfuzzer_libpng_launcher", + about = "A libfuzzer-like fuzzer for libpng with llmp-multithreading support and a launcher", + author = "Andrea Fioraldi , Dominik Maier " +)] +struct Opt { + #[arg( + short, + long, + value_parser = Cores::from_cmdline, + help = "Spawn a client in each of the provided cores. Broker runs in the 0th core. 'all' to select all available cores. 'none' to run a client without binding to any core. eg: '1,2-4,6' selects the cores 1,2,3,4,6.", + name = "CORES" + )] + cores: Cores, + + #[arg( + short = 'p', + long, + help = "Choose the broker TCP port, default is 1337", + name = "PORT", + default_value = "1337" + )] + broker_port: u16, + + #[arg(short = 'a', long, help = "Specify a remote broker", name = "REMOTE")] + remote_broker_addr: Option, + + #[arg( + short, + long, + help = "Set an initial corpus directory", + name = "INPUT", + required = true + )] + input: Vec, + + #[arg( + short, + long, + help = "Set the output directory, default is ./out", + name = "OUTPUT", + default_value = "./out" + )] + output: PathBuf, + + #[arg( + value_parser = timeout_from_millis_str, + short, + long, + help = "Set the exeucution timeout in milliseconds, default is 10000", + name = "TIMEOUT", + default_value = "10000" + )] + timeout: Duration, + + /* + /// This fuzzer has hard-coded tokens + #[arg( + + short = "x", + long, + help = "Feed the fuzzer with an user-specified list of tokens (often called \"dictionary\"", + name = "TOKENS", + multiple = true + )] + tokens: Vec, + */ + #[arg( + long, + help = "The address of the parent node to connect to, if any", + name = "PARENT_ADDR", + default_value = None + )] + parent_addr: Option, + + #[arg( + long, + help = "The port on which the node will listen on, if children are to be expected", + name = "NODE_LISTENING_PORT", + default_value = None + )] + node_listening_port: Option, +} + +/// The main fn, `no_mangle` as it is a C symbol +#[no_mangle] +pub extern "C" fn libafl_main() { + env_logger::init(); + + // Registry the metadata types used in this fuzzer + // Needed only on no_std + // unsafe { RegistryBuilder::register::(); } + let opt = Opt::parse(); + + let broker_port = opt.broker_port; + let cores = opt.cores; + + println!( + "Workdir: {:?}", + env::current_dir().unwrap().to_string_lossy().to_string() + ); + + let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory"); + + let monitor = MultiMonitor::new(|s| println!("{s}")); + + let mut secondary_run_client = + |state: Option<_>, + mut mgr: CentralizedEventManager<_, _, _, _>, + _client_description: ClientDescription| { + // Create an observation channel using the coverage map + let edges_observer = + HitcountsMapObserver::new(unsafe { std_edges_map_observer("edges") }) + .track_indices(); + + // Create an observation channel to keep track of the execution time + let time_observer = TimeObserver::new("time"); + + // Feedback to rate the interestingness of an input + // This one is composed by two Feedbacks in OR + let mut feedback = feedback_or!( + // New maximization map feedback linked to the edges observer and the feedback state + MaxMapFeedback::new(&edges_observer), + // Time feedback, this one does not need a feedback state + TimeFeedback::new(&time_observer) + ); + + // A feedback to choose if an input is a solution or not + let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new()); + + // If not restarting, create a State from scratch + let mut state = state.unwrap_or_else(|| { + StdState::new( + // RNG + StdRand::new(), + // Corpus that will be evolved, we keep it in memory for performance + InMemoryCorpus::new(), + // Corpus in which we store solutions (crashes in this example), + // on disk so the user can get them after stopping the fuzzer + OnDiskCorpus::new(&opt.output).unwrap(), + // States of the feedbacks. + // The feedbacks can report the data that should persist in the State. + &mut feedback, + // Same for objective feedbacks + &mut objective, + ) + .unwrap() + }); + + println!("We're a client, let's fuzz :)"); + + // Create a PNG dictionary if not existing + if state.metadata_map().get::().is_none() { + state.add_metadata(Tokens::from([ + vec![137, 80, 78, 71, 13, 10, 26, 10], // PNG header + "IHDR".as_bytes().to_vec(), + "IDAT".as_bytes().to_vec(), + "PLTE".as_bytes().to_vec(), + "IEND".as_bytes().to_vec(), + ])); + } + + // Setup a basic mutator with a mutational stage + let mutator = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations())); + let mut stages = tuple_list!(StdMutationalStage::new(mutator)); + + // A minimization+queue policy to get testcasess from the corpus + let scheduler = + IndexesLenTimeMinimizerScheduler::new(&edges_observer, QueueScheduler::new()); + + // A fuzzer with feedbacks and a corpus scheduler + let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); + + // The wrapped harness function, calling out to the LLVM-style harness + let mut harness = |input: &BytesInput| { + let target = input.target_bytes(); + let buf = target.as_slice(); + unsafe { + libfuzzer_test_one_input(buf); + } + ExitKind::Ok + }; + + // Create the executor for an in-process function with one observer for edge coverage and one for the execution time + #[cfg(target_os = "linux")] + let mut executor = InProcessExecutor::batched_timeout( + &mut harness, + tuple_list!(edges_observer, time_observer), + &mut fuzzer, + &mut state, + &mut mgr, + opt.timeout, + )?; + + #[cfg(not(target_os = "linux"))] + let mut executor = InProcessExecutor::with_timeout( + &mut harness, + tuple_list!(edges_observer, time_observer), + &mut fuzzer, + &mut state, + &mut mgr, + opt.timeout, + )?; + + // The actual target run starts here. + // Call LLVMFUzzerInitialize() if present. + let args: Vec = env::args().collect(); + if unsafe { libfuzzer_initialize(&args) } == -1 { + println!("Warning: LLVMFuzzerInitialize failed with -1"); + } + + // In case the corpus is empty (on first run), reset + if state.must_load_initial_inputs() { + state + .load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &opt.input) + .unwrap_or_else(|_| { + panic!("Failed to load initial corpus at {:?}", &opt.input) + }); + println!("We imported {} inputs from disk.", state.corpus().count()); + } + if !mgr.is_main() { + fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; + } else { + let mut empty_stages = tuple_list!(); + fuzzer.fuzz_loop(&mut empty_stages, &mut executor, &mut state, &mut mgr)?; + } + Ok(()) + }; + + let mut main_run_client = secondary_run_client; // clone it just for borrow checker + + let parent_addr: Option = opt + .parent_addr + .map(|parent_str| SocketAddr::from_str(parent_str.as_str()).expect("Wrong parent address")); + + let mut node_description = NodeDescriptor::builder().parent_addr(parent_addr).build(); + + if opt.node_listening_port.is_some() { + node_description.node_listening_port = opt.node_listening_port; + } + + match CentralizedLauncher::builder() + .shmem_provider(shmem_provider) + .configuration(EventConfig::from_name("default")) + .monitor(monitor) + .secondary_run_client(&mut secondary_run_client) + .main_run_client(&mut main_run_client) + .cores(&cores) + .broker_port(broker_port) + .centralized_broker_port(broker_port + 1) + .remote_broker_addr(opt.remote_broker_addr) + .multi_machine_node_descriptor(node_description) + // .stdout_file(Some("/dev/null")) + .build() + .launch() + { + Ok(()) => (), + Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."), + Err(err) => panic!("Failed to run launcher: {err:?}"), + } +} diff --git a/fuzzers/libafl_atheris/Cargo.toml b/fuzzers/libafl_atheris/Cargo.toml deleted file mode 100644 index 349ebdb01e..0000000000 --- a/fuzzers/libafl_atheris/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "libafl_atheris" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "4.4" - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["pointer_maps", "sancov_cmplog", "libfuzzer", "sancov_8bit"] } -clap = { version = "4.0", features = ["default"] } - -[lib] -name = "afl_atheris" -crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libmozjpeg/Cargo.toml b/fuzzers/libfuzzer_libmozjpeg/Cargo.toml deleted file mode 100644 index b8b0f67929..0000000000 --- a/fuzzers/libfuzzer_libmozjpeg/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "libfuzzer_libmozjpeg" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_edges", "sancov_value_profile", "libfuzzer"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -mimalloc = { version = "*", default-features = false } - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } - -[lib] -name = "libfuzzer_libmozjpeg" -crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng/Cargo.toml b/fuzzers/libfuzzer_libpng/Cargo.toml deleted file mode 100644 index 4dedd0571b..0000000000 --- a/fuzzers/libfuzzer_libpng/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "libfuzzer_libpng" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] -# Forces a crash -crash = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "4.4" - -[dependencies] -libafl = { path = "../../libafl/", features = ["default"] } -# libafl = { path = "../../libafl/", features = ["default"] } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer", "sancov_cmplog"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -mimalloc = { version = "*", default-features = false } - -[lib] -name = "libfuzzer_libpng" -crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_accounting/Cargo.toml b/fuzzers/libfuzzer_libpng_accounting/Cargo.toml deleted file mode 100644 index cfb81b5292..0000000000 --- a/fuzzers/libfuzzer_libpng_accounting/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "libfuzzer_libpng_accounting" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "4.4" - -[dependencies] -libafl = { path = "../../libafl/", features = ["std", "derive", "llmp_compression", "introspection"] } -libafl_bolts = { path = "../../libafl_bolts/", features = ["std", "derive", "llmp_compression"] } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -clap = { version = "4.0", features = ["derive"] } -mimalloc = { version = "*", default-features = false } - -[lib] -name = "libfuzzer_libpng" -crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/Cargo.toml b/fuzzers/libfuzzer_libpng_aflpp_ui/Cargo.toml deleted file mode 100644 index 7204a832d2..0000000000 --- a/fuzzers/libfuzzer_libpng_aflpp_ui/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "libfuzzer_libpng_AFLStyle_UI" -version = "0.0.1" -authors = ["Heng Zhang ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] -# Forces a crash -crash = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "4.4" - -[dependencies] -libafl = { path = "../../libafl/", features = ["default"] } -# libafl = { path = "../../libafl/", features = ["default"] } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer", "sancov_cmplog"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -mimalloc = { version = "*", default-features = false } - -[lib] -name = "libfuzzer_libpng" -crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/Makefile.toml b/fuzzers/libfuzzer_libpng_aflpp_ui/Makefile.toml deleted file mode 100644 index 3624a0b321..0000000000 --- a/fuzzers/libfuzzer_libpng_aflpp_ui/Makefile.toml +++ /dev/null @@ -1,194 +0,0 @@ -# Variables -[env] -FUZZER_NAME='fuzzer_libpng' -PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target"} -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} -LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' -LIBAFL_CXX = '${CARGO_TARGET_DIR}/${PROFILE}/libafl_cxx' -FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' - -[tasks.unsupported] -script_runner="@shell" -script=''' -echo "Cargo-make not integrated yet on this" -''' - -# libpng -[tasks.libpng] -linux_alias = "libpng_unix" -mac_alias = "libpng_unix" -windows_alias = "unsupported" - -[tasks.libpng_unix] -condition = { files_not_exist = ["./libpng-1.6.37"]} -script_runner="@shell" -script=''' -wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz -tar -xvf v1.6.37.tar.gz -''' - -# Compilers -[tasks.cxx] -linux_alias = "cxx_unix" -mac_alias = "cxx_unix" -windows_alias = "unsupported" - -[tasks.cxx_unix] -command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] - -[tasks.cc] -linux_alias = "cc_unix" -mac_alias = "cc_unix" -windows_alias = "unsupported" - -[tasks.cc_unix] -command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] - -[tasks.crash_cxx] -linux_alias = "crash_cxx_unix" -mac_alias = "crash_cxx_unix" -windows_alias = "unsupported" - -[tasks.crash_cxx_unix] -command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--features=crash"] - -[tasks.crash_cc] -linux_alias = "crash_cc_unix" -mac_alias = "crash_cc_unix" -windows_alias = "unsupported" - -[tasks.crash_cc_unix] -command = "cargo" -args = ["build" , "--profile", "${PROFILE}", "--features=crash"] - -# Library -[tasks.lib] -linux_alias = "lib_unix" -mac_alias = "lib_unix" -windows_alias = "unsupported" - -[tasks.lib_unix] -script_runner="@shell" -script=''' -cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes -cd "${PROJECT_DIR}" -make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -''' -dependencies = [ "libpng", "cxx", "cc" ] - -# Library -[tasks.crash_lib] -linux_alias = "crash_lib_unix" -mac_alias = "crash_lib_unix" -windows_alias = "unsupported" - -[tasks.crash_lib_unix] -script_runner="@shell" -script=''' -cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes -cd "${PROJECT_DIR}" -make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -''' -dependencies = [ "libpng", "crash_cxx", "crash_cc" ] - -# Harness -[tasks.fuzzer] -linux_alias = "fuzzer_unix" -mac_alias = "fuzzer_unix" -windows_alias = "unsupported" - -[tasks.fuzzer_unix] -command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}", "-lm", "-lz"] -dependencies = [ "lib", "cxx", "cc" ] - -# Crashing Harness -[tasks.fuzzer_crash] -linux_alias = "fuzzer_crash_unix" -mac_alias = "fuzzer_crash_unix" -windows_alias = "unsupported" - -[tasks.fuzzer_crash_unix] -command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}_crash", "-lm", "-lz"] -dependencies = [ "crash_lib", "crash_cxx", "crash_cc" ] - -# Run the fuzzer -[tasks.run] -linux_alias = "run_unix" -mac_alias = "run_unix" -windows_alias = "unsupported" - -[tasks.run_unix] -script_runner = "@shell" -script=''' -./${FUZZER_NAME} & -sleep 0.2 -./${FUZZER_NAME} 2>/dev/null -''' -dependencies = [ "fuzzer" ] - - -# Run the fuzzer with a crash -[tasks.crash] -linux_alias = "crash_unix" -mac_alias = "crash_unix" -windows_alias = "unsupported" - -[tasks.crash_unix] -script_runner = "@shell" -script=''' -./${FUZZER_NAME}_crash & -sleep 0.2 -./${FUZZER_NAME}_crash 2>/dev/null -''' -dependencies = [ "fuzzer_crash" ] - - - -# Test -[tasks.test] -linux_alias = "test_unix" -mac_alias = "test_mac" -windows_alias = "unsupported" - -[tasks.test_unix] -script_runner = "@shell" -script=''' -rm -rf libafl_unix_shmem_server || true -(timeout --foreground 11s ./${FUZZER_NAME} >/dev/null 2>/dev/null || true) & -sleep 0.2 -timeout --foreground 10s ./${FUZZER_NAME} >/dev/null 2>/dev/null || true -''' -dependencies = [ "fuzzer" ] - -[tasks.test_mac] -script_runner = "@shell" -script=''' -rm -rf libafl_unix_shmem_server || true -(timeout --foreground 11s ./${FUZZER_NAME} | tee fuzz_stdout.log 2>/dev/null || true) & -sleep 0.2 -timeout --foreground 10s ./${FUZZER_NAME} >/dev/null 2>/dev/null || true -''' -dependencies = [ "fuzzer" ] - -# Clean up -[tasks.clean] -linux_alias = "clean_unix" -mac_alias = "clean_unix" -windows_alias = "unsupported" - -[tasks.clean_unix] -# Disable default `clean` definition -clear = true -script_runner="@shell" -script=''' -rm -f ./${FUZZER_NAME} -make -C libpng-1.6.37 clean -cargo clean -''' diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/README.md b/fuzzers/libfuzzer_libpng_aflpp_ui/README.md deleted file mode 100644 index f87639b920..0000000000 --- a/fuzzers/libfuzzer_libpng_aflpp_ui/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# Libfuzzer for libpng, with AFL-style UI - -This folder contains an example fuzzer for libpng, using LLMP for fast multi-process fuzzing and crash detection. - -In contrast to other fuzzer examples, it keeps track of AFL style metrics and display them in the terminal. - -## Build - -To build this example, run - -```bash -cargo build --release -``` - -This will build the library with the fuzzer (src/lib.rs) with the libfuzzer compatibility layer and the SanitizerCoverage runtime functions for coverage feedback. -In addition, it will also build two C and C++ compiler wrappers (bin/libafl_c(libafl_c/xx).rs) that you must use to compile the target. - -The compiler wrappers, `libafl_cc` and `libafl_cxx`, will end up in `./target/release/` (or `./target/debug`, in case you did not build with the `--release` flag). - -Then download libpng, and unpack the archive: -```bash -wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz -tar -xvf v1.6.37.tar.gz -``` - -Now compile libpng, using the libafl_cc compiler wrapper: - -```bash -cd libpng-1.6.37 -./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes -make CC="$(pwd)/../target/release/libafl_cc" CXX="$(pwd)/../target/release/libafl_cxx" -j `nproc` -``` - -You can find the static lib at `libpng-1.6.37/.libs/libpng16.a`. - -Now, we have to build the libfuzzer harness and link all together to create our fuzzer binary. - -``` -cd .. -./target/release/libafl_cxx ./harness.cc libpng-1.6.37/.libs/libpng16.a -I libpng-1.6.37/ -o fuzzer_libpng -lz -lm -``` - -Afterward, the fuzzer will be ready to run. -Note that, unless you use the `launcher`, you will have to run the binary multiple times to actually start the fuzz process, see `Run` in the following. -This allows you to run multiple different builds of the same fuzzer alongside, for example, with and without ASAN (`-fsanitize=address`) or with different mutators. - -## Run - -The first time you run the binary, the broker will open a tcp port (currently on port `1337`), waiting for fuzzer clients to connect. This port is local and only used for the initial handshake. All further communication happens via shared map, to be independent of the kernel. Currently, you must run the clients from the libfuzzer_libpng directory for them to be able to access the PNG corpus. - -``` -./fuzzer_libpng -``` - -And after running the above again in a separate terminal: - -``` -[libafl/src/bolts/llmp.rs:1464] "New connection" = "New connection" -[libafl/src/bolts/llmp.rs:1464] addr = 127.0.0.1:33500 -[libafl/src/bolts/llmp.rs:1464] stream.peer_addr().unwrap() = 127.0.0.1:33500 -[LOG Debug]: Loaded 4 initial testcases. -[New Testcase #2] clients: 3, corpus: 6, objectives: 0, executions: 5, exec/sec: 0 -< fuzzing stats > -``` -You will get an AFL-Style UI in your terminal. - -As this example uses in-process fuzzing, we added a Restarting Event Manager (`setup_restarting_mgr`). -This means each client will start itself again to listen for crashes and timeouts. -By restarting the actual fuzzer, it can recover from these exit conditions. - -In any real-world scenario, you should use `taskset` to pin each client to an empty CPU core, the lib does not pick an empty core automatically (yet). - diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/src/lib.rs b/fuzzers/libfuzzer_libpng_aflpp_ui/src/lib.rs deleted file mode 100644 index 340e71b1fb..0000000000 --- a/fuzzers/libfuzzer_libpng_aflpp_ui/src/lib.rs +++ /dev/null @@ -1,240 +0,0 @@ -//! A libfuzzer-like fuzzer with llmp-multithreading support and restarts -//! The example harness is built for libpng. -use core::time::Duration; -#[cfg(feature = "crash")] -use std::ptr; -use std::{env, path::PathBuf}; - -use libafl::{ - corpus::{Corpus, InMemoryOnDiskCorpus, OnDiskCorpus}, - events::{setup_restarting_mgr_std, EventConfig, EventRestarter}, - executors::{inprocess::InProcessExecutor, ExitKind}, - feedback_or, feedback_or_fast, - feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, - fuzzer::{Fuzzer, StdFuzzer}, - inputs::{BytesInput, HasTargetBytes}, - monitors::tui::{ui::TuiUI, TuiMonitor}, - mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, - token_mutations::Tokens, - }, - observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, - schedulers::{ - powersched::PowerSchedule, IndexesLenTimeMinimizerScheduler, StdWeightedScheduler, - }, - stages::{calibrate::CalibrationStage, power::StdPowerMutationalStage, stats::AflStatsStage}, - state::{HasCorpus, StdState}, - Error, HasMetadata, -}; -use libafl_bolts::{ - rands::StdRand, - tuples::{tuple_list, Merge}, - AsSlice, -}; -use libafl_targets::{libfuzzer_initialize, libfuzzer_test_one_input, EDGES_MAP, MAX_EDGES_FOUND}; -use mimalloc::MiMalloc; - -#[global_allocator] -static GLOBAL: MiMalloc = MiMalloc; - -/// The main fn, `no_mangle` as it is a C main -#[cfg(not(test))] -#[no_mangle] -pub extern "C" fn libafl_main() { - // Registry the metadata types used in this fuzzer - // Needed only on no_std - // unsafe { RegistryBuilder::register::(); } - - println!( - "Workdir: {:?}", - env::current_dir().unwrap().to_string_lossy().to_string() - ); - fuzz( - &[PathBuf::from("./corpus")], - PathBuf::from("./out"), - PathBuf::from("./crashes"), - 1337, - ) - .expect("An error occurred while fuzzing"); -} - -/// The actual fuzzer -#[cfg(not(test))] -fn fuzz( - initial_input_dirs: &[PathBuf], - corpus_dir: PathBuf, - objective_dir: PathBuf, - broker_port: u16, -) -> Result<(), Error> { - // 'While the stats are state, they are usually used in the broker - which is likely never restarted - // let monitor = MultiMonitor::new(|s| println!("{s}")); - - //Setup an Monitor with AFL-Style UI to display the stats - let ui = TuiUI::with_version( - String::from("Libfuzzer For Libpng"), - String::from("0.0.1"), - false, - ); - let monitor = TuiMonitor::new(ui); - - // The restarting state will spawn the same process again as child, then restarted it each time it crashes. - let (state, mut restarting_mgr) = - match setup_restarting_mgr_std(monitor, broker_port, EventConfig::AlwaysUnique) { - Ok(res) => res, - Err(err) => match err { - Error::ShuttingDown => { - return Ok(()); - } - _ => { - panic!("Failed to setup the restarter: {err}"); - } - }, - }; - - // Create an observation channel using the coverage map - let edges_observer = unsafe { - HitcountsMapObserver::new(StdMapObserver::from_mut_ptr( - "edges", - EDGES_MAP.as_mut_ptr(), - MAX_EDGES_FOUND, - )) - .track_indices() - }; - - // Create an observation channel to keep track of the execution time - let time_observer = TimeObserver::new("time"); - - let map_feedback = MaxMapFeedback::new(&edges_observer); - - let calibration = CalibrationStage::new(&map_feedback); - - // Feedback to rate the interestingness of an input - // This one is composed by two Feedbacks in OR - let mut feedback = feedback_or!( - // New maximization map feedback linked to the edges observer and the feedback state - map_feedback, - // Time feedback, this one does not need a feedback state - TimeFeedback::new(&time_observer) - ); - - // A feedback to choose if an input is a solution or not - let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new()); - - // If not restarting, create a State from scratch - let mut state = state.unwrap_or_else(|| { - StdState::new( - // RNG - StdRand::new(), - // Corpus that will be evolved, we keep it in memory for performance - InMemoryOnDiskCorpus::new(corpus_dir).unwrap(), - // Corpus in which we store solutions (crashes in this example), - // on disk so the user can get them after stopping the fuzzer - OnDiskCorpus::new(objective_dir).unwrap(), - // States of the feedbacks. - // The feedbacks can report the data that should persist in the State. - &mut feedback, - // Same for objective feedbacks - &mut objective, - ) - .unwrap() - }); - - println!("We're a client, let's fuzz :)"); - - // Create a PNG dictionary if not existing - if state.metadata_map().get::().is_none() { - state.add_metadata(Tokens::from([ - vec![137, 80, 78, 71, 13, 10, 26, 10], // PNG header - "IHDR".as_bytes().to_vec(), - "IDAT".as_bytes().to_vec(), - "PLTE".as_bytes().to_vec(), - "IEND".as_bytes().to_vec(), - ])); - } - - // Setup a basic mutator with a mutational stage - let mutator = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations())); - - let power = StdPowerMutationalStage::new(mutator); - - // Setup a stage that can collect and send the AFL-like data - let aflstats = AflStatsStage::new(Duration::from_secs(3)); - - let mut stages = tuple_list!(calibration, power, aflstats); - - // A minimization+queue policy to get testcasess from the corpus - let scheduler = IndexesLenTimeMinimizerScheduler::new( - &edges_observer, - StdWeightedScheduler::with_schedule(&mut state, &edges_observer, Some(PowerSchedule::FAST)), - ); - - // A fuzzer with feedbacks and a corpus scheduler - let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); - - // The wrapped harness function, calling out to the LLVM-style harness - let mut harness = |input: &BytesInput| { - let target = input.target_bytes(); - let buf = target.as_slice(); - #[cfg(feature = "crash")] - if buf.len() > 4 && buf[4] == 0 { - unsafe { - eprintln!("Crashing (for testing purposes)"); - let addr = ptr::null_mut(); - *addr = 1; - } - } - libfuzzer_test_one_input(buf); - ExitKind::Ok - }; - - // Create the executor for an in-process function with one observer for edge coverage and one for the execution time - let mut executor = InProcessExecutor::with_timeout( - &mut harness, - tuple_list!(edges_observer, time_observer), - &mut fuzzer, - &mut state, - &mut restarting_mgr, - Duration::new(10, 0), - )?; - - // The actual target run starts here. - // Call LLVMFUzzerInitialize() if present. - let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { - println!("Warning: LLVMFuzzerInitialize failed with -1"); - } - - // In case the corpus is empty (on first run), reset - if state.must_load_initial_inputs() { - state - .load_initial_inputs( - &mut fuzzer, - &mut executor, - &mut restarting_mgr, - initial_input_dirs, - ) - .unwrap_or_else(|_| { - panic!("Failed to load initial corpus at {:?}", &initial_input_dirs) - }); - println!("We imported {} inputs from disk.", state.corpus().count()); - } - - // This fuzzer restarts after 1 mio `fuzz_one` executions. - // Each fuzz_one will internally do many executions of the target. - // If your target is very instable, setting a low count here may help. - // However, you will lose a lot of performance that way. - let iters = 1_000_000; - fuzzer.fuzz_loop_for( - &mut stages, - &mut executor, - &mut state, - &mut restarting_mgr, - iters, - )?; - - // It's important, that we store the state before restarting! - // Else, the parent will not respawn a new child and quit. - restarting_mgr.on_restart(&mut state)?; - - Ok(()) -} diff --git a/fuzzers/libfuzzer_libpng_centralized/Cargo.toml b/fuzzers/libfuzzer_libpng_centralized/Cargo.toml deleted file mode 100644 index c2694c62ea..0000000000 --- a/fuzzers/libfuzzer_libpng_centralized/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "libfuzzer_libpng_launcher_centralized" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "4.4" - -[dependencies] -libafl = { path = "../../libafl/", features = ["std", "derive", "rand_trait", "fork", "prelude", "gzip", "regex", "scalability_introspection"] } -libafl_bolts = { path = "../../libafl_bolts/", features = ["errors_backtrace"] } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -clap = { version = "4.0", features = ["derive"] } -mimalloc = { version = "*", default-features = false } -env_logger = "0.10" - -[lib] -name = "libfuzzer_libpng" -crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_centralized/src/lib.rs b/fuzzers/libfuzzer_libpng_centralized/src/lib.rs deleted file mode 100644 index 44393a0fa6..0000000000 --- a/fuzzers/libfuzzer_libpng_centralized/src/lib.rs +++ /dev/null @@ -1,274 +0,0 @@ -//! A libfuzzer-like fuzzer with llmp-multithreading support and restarts -//! The example harness is built for libpng. -//! In this example, you will see the use of the `launcher` feature. -//! The `launcher` will spawn new processes for each cpu core. -use core::time::Duration; -use std::{env, net::SocketAddr, path::PathBuf}; - -use clap::{self, Parser}; -use libafl::{ - corpus::{Corpus, InMemoryCorpus, OnDiskCorpus}, - events::{centralized::CentralizedEventManager, launcher::CentralizedLauncher, EventConfig}, - executors::{inprocess::InProcessExecutor, ExitKind}, - feedback_or, feedback_or_fast, - feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, - fuzzer::{Fuzzer, StdFuzzer}, - inputs::{BytesInput, HasTargetBytes}, - monitors::MultiMonitor, - mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, - token_mutations::Tokens, - }, - observers::{CanTrack, HitcountsMapObserver, TimeObserver}, - schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, - stages::mutational::StdMutationalStage, - state::{HasCorpus, StdState}, - Error, HasMetadata, -}; -use libafl_bolts::{ - core_affinity::{CoreId, Cores}, - rands::StdRand, - shmem::{ShMemProvider, StdShMemProvider}, - tuples::{tuple_list, Merge}, - AsSlice, -}; -use libafl_targets::{libfuzzer_initialize, libfuzzer_test_one_input, std_edges_map_observer}; -use mimalloc::MiMalloc; - -#[global_allocator] -static GLOBAL: MiMalloc = MiMalloc; - -/// Parse a millis string to a [`Duration`]. Used for arg parsing. -fn timeout_from_millis_str(time: &str) -> Result { - Ok(Duration::from_millis(time.parse()?)) -} - -/// The commandline args this fuzzer accepts -#[derive(Debug, Parser)] -#[command( - name = "libfuzzer_libpng_launcher", - about = "A libfuzzer-like fuzzer for libpng with llmp-multithreading support and a launcher", - author = "Andrea Fioraldi , Dominik Maier " -)] -struct Opt { - #[arg( - short, - long, - value_parser = Cores::from_cmdline, - help = "Spawn a client in each of the provided cores. Broker runs in the 0th core. 'all' to select all available cores. 'none' to run a client without binding to any core. eg: '1,2-4,6' selects the cores 1,2,3,4,6.", - name = "CORES" - )] - cores: Cores, - - #[arg( - short = 'p', - long, - help = "Choose the broker TCP port, default is 1337", - name = "PORT", - default_value = "1337" - )] - broker_port: u16, - - #[arg(short = 'a', long, help = "Specify a remote broker", name = "REMOTE")] - remote_broker_addr: Option, - - #[arg( - short, - long, - help = "Set an initial corpus directory", - name = "INPUT", - required = true - )] - input: Vec, - - #[arg( - short, - long, - help = "Set the output directory, default is ./out", - name = "OUTPUT", - default_value = "./out" - )] - output: PathBuf, - - #[arg( - value_parser = timeout_from_millis_str, - short, - long, - help = "Set the exeucution timeout in milliseconds, default is 10000", - name = "TIMEOUT", - default_value = "10000" - )] - timeout: Duration, - /* - /// This fuzzer has hard-coded tokens - #[arg( - - short = "x", - long, - help = "Feed the fuzzer with an user-specified list of tokens (often called \"dictionary\"", - name = "TOKENS", - multiple = true - )] - tokens: Vec, - */ -} - -/// The main fn, `no_mangle` as it is a C symbol -#[no_mangle] -pub extern "C" fn libafl_main() { - env_logger::init(); - - // Registry the metadata types used in this fuzzer - // Needed only on no_std - // unsafe { RegistryBuilder::register::(); } - let opt = Opt::parse(); - - let broker_port = opt.broker_port; - let cores = opt.cores; - - println!( - "Workdir: {:?}", - env::current_dir().unwrap().to_string_lossy().to_string() - ); - - let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory"); - - let monitor = MultiMonitor::new(|s| println!("{s}")); - - let mut secondary_run_client = |state: Option<_>, - mut mgr: CentralizedEventManager<_, _>, - _core_id: CoreId| { - // Create an observation channel using the coverage map - let edges_observer = - HitcountsMapObserver::new(unsafe { std_edges_map_observer("edges") }).track_indices(); - - // Create an observation channel to keep track of the execution time - let time_observer = TimeObserver::new("time"); - - // Feedback to rate the interestingness of an input - // This one is composed by two Feedbacks in OR - let mut feedback = feedback_or!( - // New maximization map feedback linked to the edges observer and the feedback state - MaxMapFeedback::new(&edges_observer), - // Time feedback, this one does not need a feedback state - TimeFeedback::new(&time_observer) - ); - - // A feedback to choose if an input is a solution or not - let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new()); - - // If not restarting, create a State from scratch - let mut state = state.unwrap_or_else(|| { - StdState::new( - // RNG - StdRand::new(), - // Corpus that will be evolved, we keep it in memory for performance - InMemoryCorpus::new(), - // Corpus in which we store solutions (crashes in this example), - // on disk so the user can get them after stopping the fuzzer - OnDiskCorpus::new(&opt.output).unwrap(), - // States of the feedbacks. - // The feedbacks can report the data that should persist in the State. - &mut feedback, - // Same for objective feedbacks - &mut objective, - ) - .unwrap() - }); - - println!("We're a client, let's fuzz :)"); - - // Create a PNG dictionary if not existing - if state.metadata_map().get::().is_none() { - state.add_metadata(Tokens::from([ - vec![137, 80, 78, 71, 13, 10, 26, 10], // PNG header - "IHDR".as_bytes().to_vec(), - "IDAT".as_bytes().to_vec(), - "PLTE".as_bytes().to_vec(), - "IEND".as_bytes().to_vec(), - ])); - } - - // Setup a basic mutator with a mutational stage - let mutator = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations())); - let mut stages = tuple_list!(StdMutationalStage::new(mutator)); - - // A minimization+queue policy to get testcasess from the corpus - let scheduler = - IndexesLenTimeMinimizerScheduler::new(&edges_observer, QueueScheduler::new()); - - // A fuzzer with feedbacks and a corpus scheduler - let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); - - // The wrapped harness function, calling out to the LLVM-style harness - let mut harness = |input: &BytesInput| { - let target = input.target_bytes(); - let buf = target.as_slice(); - libfuzzer_test_one_input(buf); - ExitKind::Ok - }; - - // Create the executor for an in-process function with one observer for edge coverage and one for the execution time - #[cfg(target_os = "linux")] - let mut executor = InProcessExecutor::batched_timeout( - &mut harness, - tuple_list!(edges_observer, time_observer), - &mut fuzzer, - &mut state, - &mut mgr, - opt.timeout, - )?; - - #[cfg(not(target_os = "linux"))] - let mut executor = InProcessExecutor::with_timeout( - &mut harness, - tuple_list!(edges_observer, time_observer), - &mut fuzzer, - &mut state, - &mut mgr, - opt.timeout, - )?; - - // The actual target run starts here. - // Call LLVMFUzzerInitialize() if present. - let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { - println!("Warning: LLVMFuzzerInitialize failed with -1"); - } - - // In case the corpus is empty (on first run), reset - if state.must_load_initial_inputs() { - state - .load_initial_inputs(&mut fuzzer, &mut executor, &mut mgr, &opt.input) - .unwrap_or_else(|_| panic!("Failed to load initial corpus at {:?}", &opt.input)); - println!("We imported {} inputs from disk.", state.corpus().count()); - } - if !mgr.is_main() { - fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; - } else { - let mut empty_stages = tuple_list!(); - fuzzer.fuzz_loop(&mut empty_stages, &mut executor, &mut state, &mut mgr)?; - } - Ok(()) - }; - - let mut main_run_client = secondary_run_client.clone(); // clone it just for borrow checker - - match CentralizedLauncher::builder() - .shmem_provider(shmem_provider) - .configuration(EventConfig::from_name("default")) - .monitor(monitor) - .secondary_run_client(&mut secondary_run_client) - .main_run_client(&mut main_run_client) - .cores(&cores) - .broker_port(broker_port) - .remote_broker_addr(opt.remote_broker_addr) - .stdout_file(Some("/dev/null")) - .build() - .launch() - { - Ok(()) => (), - Err(Error::ShuttingDown) => println!("Fuzzing stopped by user. Good bye."), - Err(err) => panic!("Failed to run launcher: {err:?}"), - } -} diff --git a/fuzzers/libfuzzer_libpng_cmin/Cargo.toml b/fuzzers/libfuzzer_libpng_cmin/Cargo.toml deleted file mode 100644 index 47e47b0193..0000000000 --- a/fuzzers/libfuzzer_libpng_cmin/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "libfuzzer_libpng_cmin" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier ", "Addison Crump "] -edition = "2021" - -[features] -default = ["std"] -std = [] -# Forces a crash -crash = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "4.4" - -[dependencies] -env_logger = "0.10" -libafl = { path = "../../libafl/", features = ["default", "cmin"] } -# libafl = { path = "../../libafl/", features = ["default"] } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer", "sancov_cmplog"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -mimalloc = { version = "*", default-features = false } - -[lib] -name = "libfuzzer_libpng" -crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_launcher/Cargo.toml b/fuzzers/libfuzzer_libpng_launcher/Cargo.toml deleted file mode 100644 index 967fdc36bb..0000000000 --- a/fuzzers/libfuzzer_libpng_launcher/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "libfuzzer_libpng_launcher" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "4.4" - -[dependencies] -libafl = { path = "../../libafl/", features = ["std", "derive", "llmp_compression", "introspection"] } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -clap = { version = "4.0", features = ["derive"] } -mimalloc = { version = "*", default-features = false } - -[lib] -name = "libfuzzer_libpng" -crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_norestart/Cargo.toml b/fuzzers/libfuzzer_libpng_norestart/Cargo.toml deleted file mode 100644 index 0383fc89d9..0000000000 --- a/fuzzers/libfuzzer_libpng_norestart/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "libfuzzer_libpng_launcher_norestart" -version = "0.9.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "4.4" - -[dependencies] -env_logger = "0.10" -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/", features = ["errors_backtrace"] } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -clap = { version = "4.1", features = ["derive"] } -mimalloc = { version = "*", default-features = false } - -[lib] -name = "libfuzzer_libpng" -crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/Cargo.toml b/fuzzers/libfuzzer_libpng_tcp_manager/Cargo.toml deleted file mode 100644 index bb864473b8..0000000000 --- a/fuzzers/libfuzzer_libpng_tcp_manager/Cargo.toml +++ /dev/null @@ -1,34 +0,0 @@ -[package] -name = "libfuzzer_libpng_tcp_manager" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] -# Forces a crash -crash = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "4.4" - -[dependencies] -libafl = { path = "../../libafl/", features = ["default", "tcp_manager"] } -# libafl = { path = "../../libafl/", features = ["default"] } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer", "sancov_cmplog"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -mimalloc = { version = "*", default-features = false } - -[lib] -name = "libfuzzer_libpng" -crate-type = ["staticlib"] diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/harness.cc b/fuzzers/libfuzzer_libpng_tcp_manager/harness.cc deleted file mode 100644 index 5c36517376..0000000000 --- a/fuzzers/libfuzzer_libpng_tcp_manager/harness.cc +++ /dev/null @@ -1,191 +0,0 @@ -// libpng_read_fuzzer.cc -// Copyright 2017-2018 Glenn Randers-Pehrson -// Copyright 2015 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that may -// be found in the LICENSE file https://cs.chromium.org/chromium/src/LICENSE - -// Last changed in libpng 1.6.35 [July 15, 2018] - -// The modifications in 2017 by Glenn Randers-Pehrson include -// 1. addition of a PNG_CLEANUP macro, -// 2. setting the option to ignore ADLER32 checksums, -// 3. adding "#include " which is needed on some platforms -// to provide memcpy(). -// 4. adding read_end_info() and creating an end_info structure. -// 5. adding calls to png_set_*() transforms commonly used by browsers. - -#include -#include -#include - -#include - -#define PNG_INTERNAL -#include "png.h" - -#define PNG_CLEANUP \ - if (png_handler.png_ptr) { \ - if (png_handler.row_ptr) { \ - png_free(png_handler.png_ptr, png_handler.row_ptr); \ - } \ - if (png_handler.end_info_ptr) { \ - png_destroy_read_struct(&png_handler.png_ptr, &png_handler.info_ptr, \ - &png_handler.end_info_ptr); \ - } else if (png_handler.info_ptr) { \ - png_destroy_read_struct(&png_handler.png_ptr, &png_handler.info_ptr, \ - nullptr); \ - } else { \ - png_destroy_read_struct(&png_handler.png_ptr, nullptr, nullptr); \ - } \ - png_handler.png_ptr = nullptr; \ - png_handler.row_ptr = nullptr; \ - png_handler.info_ptr = nullptr; \ - png_handler.end_info_ptr = nullptr; \ - } - -struct BufState { - const uint8_t *data; - size_t bytes_left; -}; - -struct PngObjectHandler { - png_infop info_ptr = nullptr; - png_structp png_ptr = nullptr; - png_infop end_info_ptr = nullptr; - png_voidp row_ptr = nullptr; - BufState *buf_state = nullptr; - - ~PngObjectHandler() { - if (row_ptr) { png_free(png_ptr, row_ptr); } - if (end_info_ptr) { - png_destroy_read_struct(&png_ptr, &info_ptr, &end_info_ptr); - } else if (info_ptr) { - png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); - } else { - png_destroy_read_struct(&png_ptr, nullptr, nullptr); - } - delete buf_state; - } -}; - -void user_read_data(png_structp png_ptr, png_bytep data, size_t length) { - BufState *buf_state = static_cast(png_get_io_ptr(png_ptr)); - if (length > buf_state->bytes_left) { png_error(png_ptr, "read error"); } - memcpy(data, buf_state->data, length); - buf_state->bytes_left -= length; - buf_state->data += length; -} - -static const int kPngHeaderSize = 8; - -// Entry point for LibFuzzer. -// Roughly follows the libpng book example: -// http://www.libpng.org/pub/png/book/chapter13.html -extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { - if (size < kPngHeaderSize) { return 0; } - - std::vector v(data, data + size); - if (png_sig_cmp(v.data(), 0, kPngHeaderSize)) { - // not a PNG. - return 0; - } - - PngObjectHandler png_handler; - png_handler.png_ptr = nullptr; - png_handler.row_ptr = nullptr; - png_handler.info_ptr = nullptr; - png_handler.end_info_ptr = nullptr; - - png_handler.png_ptr = - png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr); - if (!png_handler.png_ptr) { return 0; } - - png_handler.info_ptr = png_create_info_struct(png_handler.png_ptr); - if (!png_handler.info_ptr) { - PNG_CLEANUP - return 0; - } - - png_handler.end_info_ptr = png_create_info_struct(png_handler.png_ptr); - if (!png_handler.end_info_ptr) { - PNG_CLEANUP - return 0; - } - - png_set_crc_action(png_handler.png_ptr, PNG_CRC_QUIET_USE, PNG_CRC_QUIET_USE); -#ifdef PNG_IGNORE_ADLER32 - png_set_option(png_handler.png_ptr, PNG_IGNORE_ADLER32, PNG_OPTION_ON); -#endif - - // Setting up reading from buffer. - png_handler.buf_state = new BufState(); - png_handler.buf_state->data = data + kPngHeaderSize; - png_handler.buf_state->bytes_left = size - kPngHeaderSize; - png_set_read_fn(png_handler.png_ptr, png_handler.buf_state, user_read_data); - png_set_sig_bytes(png_handler.png_ptr, kPngHeaderSize); - - if (setjmp(png_jmpbuf(png_handler.png_ptr))) { - PNG_CLEANUP - return 0; - } - - // Reading. - png_read_info(png_handler.png_ptr, png_handler.info_ptr); - - // reset error handler to put png_deleter into scope. - if (setjmp(png_jmpbuf(png_handler.png_ptr))) { - PNG_CLEANUP - return 0; - } - - png_uint_32 width, height; - int bit_depth, color_type, interlace_type, compression_type; - int filter_type; - - if (!png_get_IHDR(png_handler.png_ptr, png_handler.info_ptr, &width, &height, - &bit_depth, &color_type, &interlace_type, &compression_type, - &filter_type)) { - PNG_CLEANUP - return 0; - } - - // This is going to be too slow. - if (width && height > 100000000 / width) { - PNG_CLEANUP -#ifdef HAS_DUMMY_CRASH - #ifdef __aarch64__ - asm volatile(".word 0xf7f0a000\n"); - #else - asm("ud2"); - #endif -#endif - return 0; - } - - // Set several transforms that browsers typically use: - png_set_gray_to_rgb(png_handler.png_ptr); - png_set_expand(png_handler.png_ptr); - png_set_packing(png_handler.png_ptr); - png_set_scale_16(png_handler.png_ptr); - png_set_tRNS_to_alpha(png_handler.png_ptr); - - int passes = png_set_interlace_handling(png_handler.png_ptr); - - png_read_update_info(png_handler.png_ptr, png_handler.info_ptr); - - png_handler.row_ptr = - png_malloc(png_handler.png_ptr, - png_get_rowbytes(png_handler.png_ptr, png_handler.info_ptr)); - - for (int pass = 0; pass < passes; ++pass) { - for (png_uint_32 y = 0; y < height; ++y) { - png_read_row(png_handler.png_ptr, - static_cast(png_handler.row_ptr), nullptr); - } - } - - png_read_end(png_handler.png_ptr, png_handler.end_info_ptr); - - PNG_CLEANUP - return 0; -} diff --git a/fuzzers/libfuzzer_stb_image/Cargo.toml b/fuzzers/libfuzzer_stb_image/Cargo.toml deleted file mode 100644 index c36ea21cbc..0000000000 --- a/fuzzers/libfuzzer_stb_image/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "libfuzzer_stb_image" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" -build = "build.rs" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_edges", "sancov_cmplog", "libfuzzer", "libfuzzer_no_link_main"] } -mimalloc = { version = "*", default-features = false } - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } \ No newline at end of file diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml b/fuzzers/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml deleted file mode 100644 index c4f818827e..0000000000 --- a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "libfuzzer_stb_image_concolic" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier ", "Julius Hohnerlein"] -edition = "2021" -build = "build.rs" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../../libafl/", features = ["concolic_mutation"] } -libafl_bolts = { path = "../../../libafl_bolts/" } -libafl_targets = { path = "../../../libafl_targets/", features = ["sancov_pcguard_edges", "sancov_cmplog", "libfuzzer"] } -clap = { version = "4.0", features = ["derive"]} -mimalloc = { version = "*", default-features = false } - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -cmake = "0.1" -which = "4.4" -symcc_libafl = {path = "../../../libafl_concolic/symcc_libafl"} diff --git a/fuzzers/libfuzzer_stb_image_sugar/Cargo.toml b/fuzzers/libfuzzer_stb_image_sugar/Cargo.toml deleted file mode 100644 index baccedd900..0000000000 --- a/fuzzers/libfuzzer_stb_image_sugar/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "libfuzzer_stb_image_sugar" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" -build = "build.rs" -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_edges", "sancov_cmplog", "libfuzzer"] } -libafl_sugar = { path = "../../libafl_sugar/" } -mimalloc = { version = "*", default-features = false } - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } diff --git a/fuzzers/libfuzzer_windows_asan/Cargo.toml b/fuzzers/libfuzzer_windows_asan/Cargo.toml deleted file mode 100644 index 515cb3b7ac..0000000000 --- a/fuzzers/libfuzzer_windows_asan/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "libfuzzer_windows_asan" -version = "0.8.2" -authors = ["Max Ammann "] -edition = "2021" -categories = ["development-tools::testing"] - -[features] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["libfuzzer", "sancov_pcguard_edges"] } -libafl_cc = { path = "../../libafl_cc/" } - -[build-dependencies] -cc = { version = "1.0" } - -[lib] -name = "libfuzzer_windows_asan" -crate-type = ["staticlib"] diff --git a/fuzzers/nautilus_sync/Cargo.toml b/fuzzers/nautilus_sync/Cargo.toml deleted file mode 100644 index 998cae3e77..0000000000 --- a/fuzzers/nautilus_sync/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "nautilus_sync" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.dev] -panic = "abort" -debug = true - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/", features = ["default", "nautilus"] } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer"] } -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } -clap = { version = "4.0", features = ["derive"] } -mimalloc = { version = "*", default-features = false } - -[lib] -name = "nautilus_sync" -crate-type = ["staticlib"] diff --git a/fuzzers/nyx_libxml2_parallel/Cargo.toml b/fuzzers/nyx_libxml2_parallel/Cargo.toml deleted file mode 100644 index a342857811..0000000000 --- a/fuzzers/nyx_libxml2_parallel/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "nyx_libxml2_parallel" -version = "0.13.0" -edition = "2021" -default-run = "nyx_libxml2_parallel" - -[dependencies] -libafl = { path = "../../libafl" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_cc = { path = "../../libafl_cc" } -libafl_nyx = { path = "../../libafl_nyx" } - -[profile.release] -codegen-units = 1 -opt-level = 3 diff --git a/fuzzers/nyx_libxml2_standalone/Cargo.toml b/fuzzers/nyx_libxml2_standalone/Cargo.toml deleted file mode 100644 index 8fd819fe1b..0000000000 --- a/fuzzers/nyx_libxml2_standalone/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "nyx_libxml2_standalone" -version = "0.13.0" -edition = "2021" -default-run = "nyx_libxml2_standalone" - -[dependencies] -libafl = { path = "../../libafl" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_cc = { path = "../../libafl_cc" } -libafl_nyx = { path = "../../libafl_nyx" } - -[profile.release] -codegen-units = 1 -opt-level = 3 diff --git a/fuzzers/push_stage_harness/.gitignore b/fuzzers/push_stage_harness/.gitignore deleted file mode 100644 index a977a2ca5b..0000000000 --- a/fuzzers/push_stage_harness/.gitignore +++ /dev/null @@ -1 +0,0 @@ -libpng-* \ No newline at end of file diff --git a/fuzzers/push_stage_harness/Cargo.toml b/fuzzers/push_stage_harness/Cargo.toml deleted file mode 100644 index 8fda3e74e7..0000000000 --- a/fuzzers/push_stage_harness/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "push_stage_harness" -version = "0.10.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.dev] -panic = "abort" - -[profile.release] -panic = "abort" -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } diff --git a/fuzzers/qemu_cmin/Cargo.toml b/fuzzers/qemu_cmin/Cargo.toml deleted file mode 100644 index 395f5ce2f1..0000000000 --- a/fuzzers/qemu_cmin/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "qemu_cmin" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier ", "WorksButNotTested"] -edition = "2021" - -[profile.release] -#lto = true -#codegen-units = 1 -#opt-level = 3 -debug = true - -[features] -default = ["std"] -std = [] -be = ["libafl_qemu/be"] -arm = ["libafl_qemu/arm"] -x86_64 = ["libafl_qemu/x86_64"] -i386 = ["libafl_qemu/i386"] -aarch64 = ["libafl_qemu/aarch64"] -mips = ["libafl_qemu/mips"] -ppc = ["libafl_qemu/ppc", "be"] - -[build-dependencies] -vergen = { version = "8.2.1", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] } - -[dependencies] -clap = { version = "4.3.0", features = ["derive", "string"]} -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_qemu = { path = "../../libafl_qemu/", features = ["usermode"] } -log = {version = "0.4.20" } -rangemap = { version = "1.3" } diff --git a/fuzzers/qemu_coverage/Cargo.toml b/fuzzers/qemu_coverage/Cargo.toml deleted file mode 100644 index 30b7e4a0c0..0000000000 --- a/fuzzers/qemu_coverage/Cargo.toml +++ /dev/null @@ -1,33 +0,0 @@ -[package] -name = "qemu_coverage" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier ", "WorksButNotTested"] -edition = "2021" - -[profile.release] -#lto = true -#codegen-units = 1 -#opt-level = 3 -debug = true - -[features] -default = ["std"] -std = [] -be = ["libafl_qemu/be"] -arm = ["libafl_qemu/arm"] -x86_64 = ["libafl_qemu/x86_64"] -i386 = ["libafl_qemu/i386"] -aarch64 = ["libafl_qemu/aarch64"] -mips = ["libafl_qemu/mips"] -ppc = ["libafl_qemu/ppc", "be"] - -[build-dependencies] -vergen = { version = "8.2.1", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] } - -[dependencies] -clap = { version = "4.3.0", features = ["derive", "string"]} -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_qemu = { path = "../../libafl_qemu/", features = ["usermode"] } -log = {version = "0.4.20" } -rangemap = { version = "1.3" } diff --git a/fuzzers/qemu_launcher/Cargo.toml b/fuzzers/qemu_launcher/Cargo.toml deleted file mode 100644 index b919716541..0000000000 --- a/fuzzers/qemu_launcher/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -[package] -name = "qemu_launcher" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std", "injections"] -std = [] - -## Build with a simple event manager instead of Launcher - don't fork, and crash after the first bug. -simplemgr = [] - -## Enable fuzzing for injections (where supported) -injections = ["libafl_qemu/injections"] - -## Set emulator to big endian -be = ["libafl_qemu/be"] - -#! ## Mutually exclusive architectures -arm = ["libafl_qemu/arm"] -x86_64 = ["libafl_qemu/x86_64"] -i386 = ["libafl_qemu/i386"] -aarch64 = ["libafl_qemu/aarch64"] -mips = ["libafl_qemu/mips"] -ppc = ["libafl_qemu/ppc", "be"] -hexagon = ["libafl_qemu/hexagon"] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -vergen = { version = "8.2", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] } - -[dependencies] -clap = { version = "4.3", features = ["derive", "string"]} -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/", features = ["errors_backtrace"] } -libafl_qemu = { path = "../../libafl_qemu/", features = ["usermode"] } -log = {version = "0.4.20" } -nix = { version = "0.29", features = ["fs"] } -rangemap = { version = "1.3" } -readonly = { version = "0.2.10" } -typed-builder = { version = "0.18" } diff --git a/fuzzers/qemu_launcher/corpus/not_kitty.png b/fuzzers/qemu_launcher/corpus/not_kitty.png deleted file mode 100644 index eff7c1707b..0000000000 Binary files a/fuzzers/qemu_launcher/corpus/not_kitty.png and /dev/null differ diff --git a/fuzzers/qemu_launcher/corpus/not_kitty_alpha.png b/fuzzers/qemu_launcher/corpus/not_kitty_alpha.png deleted file mode 100644 index 2fb8da2c8f..0000000000 Binary files a/fuzzers/qemu_launcher/corpus/not_kitty_alpha.png and /dev/null differ diff --git a/fuzzers/qemu_launcher/corpus/not_kitty_gamma.png b/fuzzers/qemu_launcher/corpus/not_kitty_gamma.png deleted file mode 100644 index 939d9d29a9..0000000000 Binary files a/fuzzers/qemu_launcher/corpus/not_kitty_gamma.png and /dev/null differ diff --git a/fuzzers/qemu_launcher/corpus/not_kitty_icc.png b/fuzzers/qemu_launcher/corpus/not_kitty_icc.png deleted file mode 100644 index f0c7804d99..0000000000 Binary files a/fuzzers/qemu_launcher/corpus/not_kitty_icc.png and /dev/null differ diff --git a/fuzzers/qemu_launcher/src/client.rs b/fuzzers/qemu_launcher/src/client.rs deleted file mode 100644 index 0259b31552..0000000000 --- a/fuzzers/qemu_launcher/src/client.rs +++ /dev/null @@ -1,269 +0,0 @@ -use std::{env, ops::Range}; - -use libafl::{ - corpus::{InMemoryOnDiskCorpus, OnDiskCorpus}, - inputs::BytesInput, - monitors::Monitor, - state::StdState, - Error, -}; -use libafl_bolts::{core_affinity::CoreId, rands::StdRand, tuples::tuple_list}; -#[cfg(feature = "injections")] -use libafl_qemu::injections::QemuInjectionHelper; -use libafl_qemu::{ - asan::{init_qemu_with_asan, QemuAsanHelper}, - asan_guest::{init_qemu_with_asan_guest, QemuAsanGuestHelper}, - cmplog::QemuCmpLogHelper, - edges::QemuEdgeCoverageHelper, - elf::EasyElf, - ArchExtras, GuestAddr, Qemu, QemuInstrumentationAddressRangeFilter, -}; - -use crate::{ - instance::{ClientMgr, Instance}, - options::FuzzerOptions, -}; - -#[allow(clippy::module_name_repetitions)] -pub type ClientState = - StdState, StdRand, OnDiskCorpus>; - -pub struct Client<'a> { - options: &'a FuzzerOptions, -} - -impl<'a> Client<'a> { - pub fn new(options: &FuzzerOptions) -> Client { - Client { options } - } - - fn args(&self) -> Result, Error> { - let program = env::args() - .next() - .ok_or_else(|| Error::empty_optional("Failed to read program name"))?; - - let mut args = self.options.args.clone(); - args.insert(0, program); - Ok(args) - } - - #[allow(clippy::unused_self)] // Api should look the same as args above - fn env(&self) -> Vec<(String, String)> { - env::vars() - .filter(|(k, _v)| k != "LD_LIBRARY_PATH") - .collect::>() - } - - fn start_pc(qemu: &Qemu) -> Result { - let mut elf_buffer = Vec::new(); - let elf = EasyElf::from_file(qemu.binary_path(), &mut elf_buffer)?; - - let start_pc = elf - .resolve_symbol("LLVMFuzzerTestOneInput", qemu.load_addr()) - .ok_or_else(|| Error::empty_optional("Symbol LLVMFuzzerTestOneInput not found"))?; - Ok(start_pc) - } - - #[allow(clippy::similar_names)] // elf != self - fn coverage_filter(&self, qemu: &Qemu) -> Result { - /* Conversion is required on 32-bit targets, but not on 64-bit ones */ - if let Some(includes) = &self.options.include { - #[cfg_attr(target_pointer_width = "64", allow(clippy::useless_conversion))] - let rules = includes - .iter() - .map(|x| Range { - start: x.start.into(), - end: x.end.into(), - }) - .collect::>>(); - Ok(QemuInstrumentationAddressRangeFilter::AllowList(rules)) - } else if let Some(excludes) = &self.options.exclude { - #[cfg_attr(target_pointer_width = "64", allow(clippy::useless_conversion))] - let rules = excludes - .iter() - .map(|x| Range { - start: x.start.into(), - end: x.end.into(), - }) - .collect::>>(); - Ok(QemuInstrumentationAddressRangeFilter::DenyList(rules)) - } else { - let mut elf_buffer = Vec::new(); - let elf = EasyElf::from_file(qemu.binary_path(), &mut elf_buffer)?; - let range = elf - .get_section(".text", qemu.load_addr()) - .ok_or_else(|| Error::key_not_found("Failed to find .text section"))?; - Ok(QemuInstrumentationAddressRangeFilter::AllowList(vec![ - range, - ])) - } - } - - pub fn run( - &self, - state: Option, - mgr: ClientMgr, - core_id: CoreId, - ) -> Result<(), Error> { - let mut args = self.args()?; - log::debug!("ARGS: {:#?}", args); - - let mut env = self.env(); - log::debug!("ENV: {:#?}", env); - - let is_asan = self.options.is_asan_core(core_id); - let is_asan_guest = self.options.is_asan_guest_core(core_id); - - if is_asan && is_asan_guest { - Err(Error::empty_optional("Multiple ASAN modes configured"))?; - } - - let (qemu, mut asan, mut asan_lib) = { - if is_asan { - let (emu, asan) = init_qemu_with_asan(&mut args, &mut env)?; - (emu, Some(asan), None) - } else if is_asan_guest { - let (emu, asan_lib) = init_qemu_with_asan_guest(&mut args, &mut env)?; - (emu, None, Some(asan_lib)) - } else { - (Qemu::init(&args, &env)?, None, None) - } - }; - - let start_pc = Self::start_pc(&qemu)?; - log::debug!("start_pc @ {start_pc:#x}"); - - #[cfg(not(feature = "injections"))] - let injection_helper = None; - - #[cfg(feature = "injections")] - let injection_helper = self - .options - .injections - .as_ref() - .and_then(|injections_file| { - let lower = injections_file.to_lowercase(); - if lower.ends_with("yaml") || lower.ends_with("yml") { - Some(QemuInjectionHelper::from_yaml(injections_file).unwrap()) - } else if lower.ends_with("toml") { - Some(QemuInjectionHelper::from_toml(injections_file).unwrap()) - } else { - None - } - }); - - let extra_tokens = injection_helper.as_ref().map(|h| h.tokens.clone()); - - qemu.entry_break(start_pc); - - let ret_addr: GuestAddr = qemu - .read_return_address() - .map_err(|e| Error::unknown(format!("Failed to read return address: {e:?}")))?; - log::debug!("ret_addr = {ret_addr:#x}"); - qemu.set_breakpoint(ret_addr); - - let is_cmplog = self.options.is_cmplog_core(core_id); - - let edge_coverage_helper = QemuEdgeCoverageHelper::new(self.coverage_filter(&qemu)?); - - let instance = Instance::builder() - .options(self.options) - .qemu(&qemu) - .mgr(mgr) - .core_id(core_id) - .extra_tokens(extra_tokens); - - if is_asan && is_cmplog { - if let Some(injection_helper) = injection_helper { - instance.build().run( - tuple_list!( - edge_coverage_helper, - QemuCmpLogHelper::default(), - QemuAsanHelper::default(asan.take().unwrap()), - injection_helper, - ), - state, - ) - } else { - instance.build().run( - tuple_list!( - edge_coverage_helper, - QemuCmpLogHelper::default(), - QemuAsanHelper::default(asan.take().unwrap()), - ), - state, - ) - } - } else if is_asan_guest && is_cmplog { - if let Some(injection_helper) = injection_helper { - instance.build().run( - tuple_list!( - edge_coverage_helper, - QemuCmpLogHelper::default(), - QemuAsanGuestHelper::default(&qemu, asan_lib.take().unwrap()), - injection_helper - ), - state, - ) - } else { - instance.build().run( - tuple_list!( - edge_coverage_helper, - QemuCmpLogHelper::default(), - QemuAsanGuestHelper::default(&qemu, asan_lib.take().unwrap()), - ), - state, - ) - } - } else if is_asan { - if let Some(injection_helper) = injection_helper { - instance.build().run( - tuple_list!( - edge_coverage_helper, - QemuAsanHelper::default(asan.take().unwrap()), - injection_helper - ), - state, - ) - } else { - instance.build().run( - tuple_list!( - edge_coverage_helper, - QemuAsanHelper::default(asan.take().unwrap()), - ), - state, - ) - } - } else if is_asan_guest { - let helpers = tuple_list!( - edge_coverage_helper, - QemuAsanGuestHelper::default(&qemu, asan_lib.take().unwrap()) - ); - instance.build().run(helpers, state) - } else if is_cmplog { - if let Some(injection_helper) = injection_helper { - instance.build().run( - tuple_list!( - edge_coverage_helper, - QemuCmpLogHelper::default(), - injection_helper - ), - state, - ) - } else { - instance.build().run( - tuple_list!(edge_coverage_helper, QemuCmpLogHelper::default()), - state, - ) - } - } else if let Some(injection_helper) = injection_helper { - instance - .build() - .run(tuple_list!(edge_coverage_helper, injection_helper), state) - } else { - instance - .build() - .run(tuple_list!(edge_coverage_helper), state) - } - } -} diff --git a/fuzzers/qemu_systemmode/Cargo.toml b/fuzzers/qemu_systemmode/Cargo.toml deleted file mode 100644 index c5e4c744c4..0000000000 --- a/fuzzers/qemu_systemmode/Cargo.toml +++ /dev/null @@ -1,31 +0,0 @@ -[package] -name = "qemu_systemmode" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std", "classic"] -std = [] - -classic = [] # The classic way to interact with LibAFL QEMU, with direct calls to QEMU's functions -breakpoint = [] # Uses the command system, with breakpoints -sync_exit = [] # Uses the command system, with sync exit. - -shared = ["libafl_qemu/shared"] - -[profile.release] -incremental = true -debug = true -lto = "fat" -codegen-units = 1 - -[dependencies] -libafl = { path = "../../libafl/" } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_qemu = { path = "../../libafl_qemu/", features = ["arm", "systemmode"] } -libafl_qemu_sys = { path = "../../libafl_qemu/libafl_qemu_sys", features = ["arm", "systemmode"] } -env_logger = "*" - -[build-dependencies] -libafl_qemu_build = { path = "../../libafl_qemu/libafl_qemu_build" } diff --git a/fuzzers/qemu_systemmode/Makefile.toml b/fuzzers/qemu_systemmode/Makefile.toml deleted file mode 100644 index 2afe843b23..0000000000 --- a/fuzzers/qemu_systemmode/Makefile.toml +++ /dev/null @@ -1,203 +0,0 @@ -env_scripts = [ -''' -#!@duckscript -profile = get_env PROFILE - -if eq ${profile} "dev" - set_env PROFILE_DIR debug -else - set_env PROFILE_DIR ${profile} -end -''', -''' -#!@duckscript -runs_on_ci = get_env RUN_ON_CI - -if ${runs_on_ci} - cargo_target_dir = get_env CARGO_MAKE_CRATE_TARGET_DIRECTORY - set_env TARGET_DIR ${cargo_target_dir} - set_env KERNEL ${cargo_target_dir}/example.elf -end -''' -] - -[env] -PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } -TARGET_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${FEATURE}" -LIBAFL_QEMU_CLONE_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/qemu-libafl-bridge" -KERNEL = "${TARGET_DIR}/example.elf" - -[tasks.target_dir] -condition = { files_not_exist = [ "${TARGET_DIR}" ] } -script_runner="@shell" -script=''' -mkdir -p ${TARGET_DIR} -''' - -[tasks.image] -dependencies = ["target_dir"] -condition = { files_not_exist = [ "${TARGET_DIR}/dummy.qcow2" ] } -script_runner="@shell" -script=''' -qemu-img create -f qcow2 ${TARGET_DIR}/dummy.qcow2 32M -''' - -[tasks.target] -dependencies = ["target_dir"] -condition = { env_set = [ "TARGET_DEFINE" ] } -command = "arm-none-eabi-gcc" -args = [ - "-ggdb", - "-ffreestanding", - "-nostartfiles", - "-lgcc", - "-T", "${CARGO_MAKE_WORKING_DIRECTORY}/example/mps2_m3.ld", - "-mcpu=cortex-m3", - "${CARGO_MAKE_WORKING_DIRECTORY}/example/main.c", - "${CARGO_MAKE_WORKING_DIRECTORY}/example/startup.c", - "-D", "${TARGET_DEFINE}", - "-I", "${TARGET_DIR}/${PROFILE_DIR}/include", - "-o", "${TARGET_DIR}/example.elf", -] - -[tasks.build_fuzzer] -condition = { env_set = [ "FEATURE" ] } -command = "cargo" -args = [ - "build", - "--profile", - "${PROFILE}", - "--no-default-features", - "--features", "std,${FEATURE}", - "--target-dir", "${TARGET_DIR}", -] -dependencies = ["image"] - -[tasks.run_fuzzer] -command = "${TARGET_DIR}/${PROFILE_DIR}/qemu_systemmode" -args = [ - "-icount", "shift=auto,align=off,sleep=off", - "-machine", "mps2-an385", - "-monitor", "null", - "-kernel", "${TARGET_DIR}/example.elf", - "-serial", "null", - "-nographic", - "-snapshot", - "-drive", "if=none,format=qcow2,file=${TARGET_DIR}/dummy.qcow2", - "-S", -] -dependencies = ["target"] - -[tasks.test_fuzzer] -condition = { env_set = [ "FEATURE" ] } -script_runner="@shell" -script=''' -TMP_DIR=$(mktemp -d) - -cargo make build_$FEATURE -timeout 15s cargo make ${FEATURE} | tee $TMP_DIR/fuzz.log 2>&1 || true - -if [ -z "$(grep 'Objective' $TMP_DIR/fuzz.log)" ]; then - echo "qemu_systemmode ${FEATURE}: Fuzzer did not find the objective in $TMP_DIR/fuzz.log" - exit 1 -else - echo "qemu_systemmode ${FEATURE}: Objective found." -fi -''' - -[tasks.build_classic] -command = "cargo" -args = [ - "make", - "-e", "FEATURE=classic", - "-e", "TARGET_DEFINE=TARGET_CLASSIC", - "build_fuzzer", -] - -[tasks.test_classic] -command = "cargo" -args = [ - "make", - "-e", "FEATURE=classic", - "test_fuzzer", -] - -[tasks.build_breakpoint] -command = "cargo" -args = [ - "make", - "-e", "FEATURE=breakpoint", - "-e", "TARGET_DEFINE=TARGET_BREAKPOINT", - "build_fuzzer", -] - -[tasks.test_breakpoint] -command = "cargo" -args = [ - "make", - "-e", "FEATURE=breakpoint", - "test_fuzzer", -] - -[tasks.build_sync_exit] -command = "cargo" -args = [ - "make", - "-e", "FEATURE=sync_exit", - "-e", "TARGET_DEFINE=TARGET_SYNC_EXIT", - "build_fuzzer", -] - -[tasks.test_sync_exit] -command = "cargo" -args = [ - "make", - "-e", "FEATURE=sync_exit", - "test_fuzzer", -] - -[tasks.classic] -command = "cargo" -args = [ - "make", - "-e", "FEATURE=classic", - "-e", "TARGET_DEFINE=TARGET_CLASSIC", - "run_fuzzer", -] - -[tasks.breakpoint] -command = "cargo" -args = [ - "make", - "-e", "FEATURE=breakpoint", - "-e", "TARGET_DEFINE=TARGET_BREAKPOINT", - "run_fuzzer", -] - -[tasks.sync_exit] -command = "cargo" -args = [ - "make", - "-e", "FEATURE=sync_exit", - "-e", "TARGET_DEFINE=TARGET_SYNC_EXIT", - "run_fuzzer", -] - -[tasks.test] -clear = true -run_task = { name = ["test_classic", "test_breakpoint", "test_sync_exit"] } - -[tasks.build] -clear = true -run_task = { name = ["build_classic", "build_breakpoint", "build_sync_exit"] } - -[tasks.run] -alias="classic" - -[tasks.clean] -clear = true -script_runner="@shell" -script=''' -rm -rf ${CARGO_MAKE_CRATE_TARGET_DIRECTORY} -cargo clean -''' diff --git a/fuzzers/structure_aware/baby_fuzzer_custom_input/Cargo.toml b/fuzzers/structure_aware/baby_fuzzer_custom_input/Cargo.toml new file mode 100644 index 0000000000..8068461b0c --- /dev/null +++ b/fuzzers/structure_aware/baby_fuzzer_custom_input/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "baby_fuzzer_custom_input" +version = "0.1.0" +authors = ["Valentin Huber "] +edition = "2021" + +[features] +default = ["simple_interface"] +simple_interface = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +serde = "1.0.210" diff --git a/fuzzers/structure_aware/baby_fuzzer_custom_input/README.md b/fuzzers/structure_aware/baby_fuzzer_custom_input/README.md new file mode 100644 index 0000000000..99514c9b33 --- /dev/null +++ b/fuzzers/structure_aware/baby_fuzzer_custom_input/README.md @@ -0,0 +1,7 @@ +# Baby fuzzer + +This is a minimalistic fuzzer demonstrating how to employ mapping mutators to use default mutators on custom inputs. Custom inputs are necessary when the input to your program is a combination of parts, especially when those parts have different data types. Check multipart inputs if you have an input consisting of multiple parts of the same datatype and you don't need your mutation scheduler to be able to select which mutation is performed on which part. + +The fuzzer runs on a single core until a crash occurs and then exits. The tested program is a simple Rust function without any instrumentation. For real fuzzing, you will want to add some sort to add coverage or other feedback. + +You can run this example using `cargo run`. \ No newline at end of file diff --git a/fuzzers/structure_aware/baby_fuzzer_custom_input/src/input.rs b/fuzzers/structure_aware/baby_fuzzer_custom_input/src/input.rs new file mode 100644 index 0000000000..6c45c88939 --- /dev/null +++ b/fuzzers/structure_aware/baby_fuzzer_custom_input/src/input.rs @@ -0,0 +1,146 @@ +use core::num::NonZeroUsize; +use std::{ + borrow::Cow, + hash::{DefaultHasher, Hash, Hasher}, +}; + +use libafl::{ + corpus::CorpusId, + generators::{Generator, RandBytesGenerator}, + inputs::{BytesInput, HasTargetBytes, Input, MutVecInput}, + mutators::{MutationResult, Mutator}, + state::HasRand, + Error, SerdeAny, +}; +use libafl_bolts::{rands::Rand, Named}; +use serde::{Deserialize, Serialize}; + +/// The custom [`Input`] type used in this example, consisting of a byte array part, a byte array that is not always present, and a boolean +/// +/// Imagine these could be used to model command line arguments for a bash command, where +/// - `byte_array` is binary data that is always needed like what is passed to stdin, +/// - `optional_byte_array` is binary data passed as a command line arg, and it is only passed if it is not `None` in the input, +/// - `boolean` models the presence or absence of a command line flag that does not require additional data +#[derive(Serialize, Deserialize, Clone, Debug, Hash, SerdeAny)] +pub struct CustomInput { + pub byte_array: Vec, + pub optional_byte_array: Option>, + pub boolean: bool, +} + +/// Hash-based implementation +impl Input for CustomInput { + fn generate_name(&self, _id: Option) -> String { + let mut hasher = DefaultHasher::new(); + self.hash(&mut hasher); + format!("{:016x}", hasher.finish()) + } +} + +impl CustomInput { + /// Returns a mutable reference to the byte array + pub fn byte_array_mut(&mut self) -> MutVecInput<'_> { + (&mut self.byte_array).into() + } + + /// Returns an immutable reference to the byte array + pub fn byte_array(&self) -> &[u8] { + &self.byte_array + } + + /// Returns a mutable reference to the optional byte array + pub fn optional_byte_array_mut(&mut self) -> Option> { + self.optional_byte_array.as_mut().map(|e| e.into()) + } + + /// Returns an immutable reference to the optional byte array + pub fn optional_byte_array(&self) -> Option<&[u8]> { + self.optional_byte_array.as_deref() + } +} + +/// A generator for [`CustomInput`] used in this example +pub struct CustomInputGenerator { + pub bytes_generator: RandBytesGenerator, +} + +impl CustomInputGenerator { + /// Creates a new [`CustomInputGenerator`] + pub fn new(max_len: NonZeroUsize) -> Self { + Self { + bytes_generator: RandBytesGenerator::new(max_len), + } + } +} + +impl Generator for CustomInputGenerator +where + S: HasRand, +{ + fn generate(&mut self, state: &mut S) -> Result { + let generator = &mut self.bytes_generator; + + let byte_array = generator.generate(state).unwrap().target_bytes().into(); + let optional_byte_array = state + .rand_mut() + .coinflip(0.5) + .then(|| generator.generate(state).unwrap().target_bytes().into()); + let boolean = state.rand_mut().coinflip(0.5); + + Ok(CustomInput { + byte_array, + optional_byte_array, + boolean, + }) + } +} + +/// [`Mutator`] that toggles the optional byte array of a [`CustomInput`], i.e. sets it to [`None`] if it is not, and to a random byte array if it is [`None`] +pub struct ToggleOptionalByteArrayMutator { + generator: G, +} + +impl ToggleOptionalByteArrayMutator { + /// Creates a new [`ToggleOptionalByteArrayMutator`] + pub fn new(length: NonZeroUsize) -> Self { + Self { + generator: RandBytesGenerator::new(length), + } + } +} + +impl Mutator for ToggleOptionalByteArrayMutator +where + S: HasRand, + G: Generator, +{ + fn mutate(&mut self, state: &mut S, input: &mut CustomInput) -> Result { + input.optional_byte_array = match input.optional_byte_array { + None => Some(self.generator.generate(state)?.target_bytes().into()), + Some(_) => None, + }; + Ok(MutationResult::Mutated) + } +} + +impl Named for ToggleOptionalByteArrayMutator { + fn name(&self) -> &Cow<'static, str> { + &Cow::Borrowed("ToggleOptionalByteArrayMutator") + } +} + +/// [`Mutator`] that toggles the boolean field in a [`CustomInput`] +pub struct ToggleBooleanMutator; + +impl Mutator for ToggleBooleanMutator { + fn mutate(&mut self, _state: &mut S, input: &mut CustomInput) -> Result { + input.boolean = !input.boolean; + Ok(MutationResult::Mutated) + } +} + +impl Named for ToggleBooleanMutator { + fn name(&self) -> &Cow<'static, str> { + &Cow::Borrowed("ToggleBooleanMutator") + } +} diff --git a/fuzzers/structure_aware/baby_fuzzer_custom_input/src/main.rs b/fuzzers/structure_aware/baby_fuzzer_custom_input/src/main.rs new file mode 100644 index 0000000000..873e26dea1 --- /dev/null +++ b/fuzzers/structure_aware/baby_fuzzer_custom_input/src/main.rs @@ -0,0 +1,195 @@ +mod input; + +#[cfg(windows)] +use std::ptr::write_volatile; +use std::{path::PathBuf, ptr::write}; + +use input::{ + CustomInput, CustomInputGenerator, ToggleBooleanMutator, ToggleOptionalByteArrayMutator, +}; +#[cfg(feature = "simple_interface")] +use libafl::mutators::havoc_mutations::{mapped_havoc_mutations, optional_mapped_havoc_mutations}; +use libafl::{ + corpus::{InMemoryCorpus, OnDiskCorpus}, + events::SimpleEventManager, + executors::{inprocess::InProcessExecutor, ExitKind}, + feedbacks::{CrashFeedback, MaxMapFeedback}, + fuzzer::{Fuzzer, StdFuzzer}, + monitors::SimpleMonitor, + mutators::scheduled::StdScheduledMutator, + observers::StdMapObserver, + schedulers::QueueScheduler, + stages::mutational::StdMutationalStage, + state::StdState, +}; +use libafl_bolts::{ + current_nanos, nonzero, + rands::StdRand, + tuples::{tuple_list, Merge, Prepend}, +}; +#[cfg(not(feature = "simple_interface"))] +use { + libafl::mutators::{ + havoc_mutations::{havoc_crossover_with_corpus_mapper, havoc_mutations_no_crossover}, + mapping::{ToMappedInputFunctionMappingMutatorMapper, ToOptionMappingMutatorMapper}, + }, + libafl_bolts::tuples::Map, +}; + +/// Coverage map with explicit assignments due to the lack of instrumentation +const SIGNALS_LEN: usize = 16; +static mut SIGNALS: [u8; SIGNALS_LEN] = [0; 16]; +static mut SIGNALS_PTR: *mut u8 = &raw mut SIGNALS as _; + +/// Assign a signal to the signals map +fn signals_set(idx: usize) { + if idx > 2 { + println!("Setting signal: {idx}"); + } + unsafe { write(SIGNALS_PTR.add(idx), 1) }; +} + +#[allow(clippy::similar_names, clippy::manual_assert)] +pub fn main() { + // The closure that we want to fuzz + // The pseudo program under test uses all parts of the custom input + // We are manually setting bytes in a pseudo coverage map to guide the fuzzer + let mut harness = |input: &CustomInput| { + signals_set(0); + if input.byte_array == vec![b'a'] { + signals_set(1); + if input.optional_byte_array == Some(vec![b'b']) { + signals_set(2); + if input.boolean { + #[cfg(unix)] + panic!("Artificial bug triggered =)"); + + // panic!() raises a STATUS_STACK_BUFFER_OVERRUN exception which cannot be caught by the exception handler. + // Here we make it raise STATUS_ACCESS_VIOLATION instead. + // Extending the windows exception handler is a TODO. Maybe we can refer to what winafl code does. + // https://github.com/googleprojectzero/winafl/blob/ea5f6b85572980bb2cf636910f622f36906940aa/winafl.c#L728 + #[cfg(windows)] + unsafe { + write_volatile(0 as *mut u32, 0); + } + } + } + } + ExitKind::Ok + }; + + // Create an observation channel using the signals map + let observer = unsafe { StdMapObserver::from_mut_ptr("signals", SIGNALS_PTR, SIGNALS_LEN) }; + + // Feedback to rate the interestingness of an input + let mut feedback = MaxMapFeedback::new(&observer); + + // A feedback to choose if an input is a solution or not + let mut objective = CrashFeedback::new(); + + // create a State from scratch + let mut state = StdState::new( + // RNG + StdRand::with_seed(current_nanos()), + // Corpus that will be evolved, we keep it in memory for performance + InMemoryCorpus::new(), + // Corpus in which we store solutions (crashes in this example), + // on disk so the user can get them after stopping the fuzzer + OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(), + // States of the feedbacks. + // The feedbacks can report the data that should persist in the State. + &mut feedback, + // Same for objective feedbacks + &mut objective, + ) + .unwrap(); + + // The Monitor trait define how the fuzzer stats are displayed to the user + let mon = SimpleMonitor::new(|s| println!("{s}")); + + // The event manager handle the various events generated during the fuzzing loop + // such as the notification of the addition of a new item to the corpus + let mut mgr = SimpleEventManager::new(mon); + + // A queue policy to get testcasess from the corpus + let scheduler = QueueScheduler::new(); + + // A fuzzer with feedbacks and a corpus scheduler + let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); + + // Create the executor for an in-process function with just one observer + let mut executor = InProcessExecutor::new( + &mut harness, + tuple_list!(observer), + &mut fuzzer, + &mut state, + &mut mgr, + ) + .expect("Failed to create the Executor"); + + // Generator of printable bytearrays of max size 32 + let mut generator = CustomInputGenerator::new(nonzero!(1)); + + // Generate 8 initial inputs + state + .generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8) + .expect("Failed to generate the initial corpus"); + + #[cfg(feature = "simple_interface")] + let (mapped_mutators, optional_mapped_mutators) = { + // Creating mutators that will operate on input.byte_array + let mapped_mutators = + mapped_havoc_mutations(CustomInput::byte_array_mut, CustomInput::byte_array); + + // Creating mutators that will operate on input.optional_byte_array + let optional_mapped_mutators = optional_mapped_havoc_mutations( + CustomInput::optional_byte_array_mut, + CustomInput::optional_byte_array, + ); + (mapped_mutators, optional_mapped_mutators) + }; + + #[cfg(not(feature = "simple_interface"))] + let (mapped_mutators, optional_mapped_mutators) = { + // Creating mutators that will operate on input.byte_array + let mapped_mutators = havoc_mutations_no_crossover() + .merge(havoc_crossover_with_corpus_mapper(CustomInput::byte_array)) + .map(ToMappedInputFunctionMappingMutatorMapper::new( + CustomInput::byte_array_mut, + )); + + // Creating mutators that will operate on input.optional_byte_array + let optional_mapped_mutators = havoc_mutations_no_crossover() + .merge(havoc_crossover_with_corpus_mapper( + CustomInput::optional_byte_array, + )) + .map(ToOptionMappingMutatorMapper) + .map(ToMappedInputFunctionMappingMutatorMapper::new( + CustomInput::optional_byte_array_mut, + )); + + (mapped_mutators, optional_mapped_mutators) + }; + + // Merging multiple lists of mutators that mutate a sub-part of the custom input + // This collection could be expanded with default or custom mutators as needed for the input + let mutators = tuple_list!() + // First, mutators for the simple byte array + .merge(mapped_mutators) + // Then, mutators for the optional byte array, these return MutationResult::Skipped if the part is not present + .merge(optional_mapped_mutators) + // A custom mutator that sets the optional byte array to None if present, and generates a random byte array of length 1 if it is not + .prepend(ToggleOptionalByteArrayMutator::new(nonzero!(1))) + // Finally, a custom mutator that toggles the boolean part of the input + .prepend(ToggleBooleanMutator); + + // Scheduling layer for the mutations + let mutator_scheduler = StdScheduledMutator::new(mutators); + // Defining the mutator stage + let mut stages = tuple_list!(StdMutationalStage::new(mutator_scheduler)); + + // Run the fuzzer + fuzzer + .fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr) + .expect("Error in the fuzzing loop"); +} diff --git a/fuzzers/libfuzzer_libpng_accounting/.gitignore b/fuzzers/structure_aware/baby_fuzzer_gramatron/.gitignore similarity index 100% rename from fuzzers/libfuzzer_libpng_accounting/.gitignore rename to fuzzers/structure_aware/baby_fuzzer_gramatron/.gitignore diff --git a/fuzzers/structure_aware/baby_fuzzer_gramatron/Cargo.toml b/fuzzers/structure_aware/baby_fuzzer_gramatron/Cargo.toml new file mode 100644 index 0000000000..8b5a57d729 --- /dev/null +++ b/fuzzers/structure_aware/baby_fuzzer_gramatron/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "baby_fuzzer_gramatron" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +postcard = { version = "1.0.10", features = [ + "alloc", +], default-features = false } # no_std compatible serde serialization format diff --git a/fuzzers/baby_fuzzer_gramatron/README.md b/fuzzers/structure_aware/baby_fuzzer_gramatron/README.md similarity index 100% rename from fuzzers/baby_fuzzer_gramatron/README.md rename to fuzzers/structure_aware/baby_fuzzer_gramatron/README.md diff --git a/fuzzers/baby_fuzzer_gramatron/auto.json b/fuzzers/structure_aware/baby_fuzzer_gramatron/auto.json similarity index 100% rename from fuzzers/baby_fuzzer_gramatron/auto.json rename to fuzzers/structure_aware/baby_fuzzer_gramatron/auto.json diff --git a/fuzzers/baby_fuzzer_gramatron/auto.postcard b/fuzzers/structure_aware/baby_fuzzer_gramatron/auto.postcard similarity index 100% rename from fuzzers/baby_fuzzer_gramatron/auto.postcard rename to fuzzers/structure_aware/baby_fuzzer_gramatron/auto.postcard diff --git a/fuzzers/baby_fuzzer_gramatron/corpus/new file b/fuzzers/structure_aware/baby_fuzzer_gramatron/corpus/new file similarity index 100% rename from fuzzers/baby_fuzzer_gramatron/corpus/new file rename to fuzzers/structure_aware/baby_fuzzer_gramatron/corpus/new file diff --git a/fuzzers/baby_fuzzer_gramatron/src/main.rs b/fuzzers/structure_aware/baby_fuzzer_gramatron/src/main.rs similarity index 96% rename from fuzzers/baby_fuzzer_gramatron/src/main.rs rename to fuzzers/structure_aware/baby_fuzzer_gramatron/src/main.rs index 357bd3b17a..b4efb17f55 100644 --- a/fuzzers/baby_fuzzer_gramatron/src/main.rs +++ b/fuzzers/structure_aware/baby_fuzzer_gramatron/src/main.rs @@ -27,8 +27,9 @@ use libafl::{ use libafl_bolts::{rands::StdRand, tuples::tuple_list}; /// Coverage map with explicit assignments due to the lack of instrumentation -static mut SIGNALS: [u8; 16] = [0; 16]; -static mut SIGNALS_PTR: *mut u8 = unsafe { SIGNALS.as_mut_ptr() }; +const SIGNALS_LEN: usize = 16; +static mut SIGNALS: [u8; SIGNALS_LEN] = [0; SIGNALS_LEN]; +static mut SIGNALS_PTR: *mut u8 = unsafe { &raw mut SIGNALS as _ }; /* /// Assign a signal to the signals map fn signals_set(idx: usize) { @@ -58,7 +59,7 @@ pub fn main() { }; // Create an observation channel using the signals map - let observer = unsafe { StdMapObserver::from_mut_ptr("signals", SIGNALS_PTR, SIGNALS.len()) }; + let observer = unsafe { StdMapObserver::from_mut_ptr("signals", SIGNALS_PTR, SIGNALS_LEN) }; // Feedback to rate the interestingness of an input let mut feedback = MaxMapFeedback::new(&observer); diff --git a/fuzzers/libfuzzer_libpng_aflpp_ui/.gitignore b/fuzzers/structure_aware/baby_fuzzer_grimoire/.gitignore similarity index 100% rename from fuzzers/libfuzzer_libpng_aflpp_ui/.gitignore rename to fuzzers/structure_aware/baby_fuzzer_grimoire/.gitignore diff --git a/fuzzers/structure_aware/baby_fuzzer_grimoire/Cargo.toml b/fuzzers/structure_aware/baby_fuzzer_grimoire/Cargo.toml new file mode 100644 index 0000000000..46b6ecf433 --- /dev/null +++ b/fuzzers/structure_aware/baby_fuzzer_grimoire/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "baby_fuzzer_grimoire" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/baby_fuzzer_grimoire/README.md b/fuzzers/structure_aware/baby_fuzzer_grimoire/README.md similarity index 100% rename from fuzzers/baby_fuzzer_grimoire/README.md rename to fuzzers/structure_aware/baby_fuzzer_grimoire/README.md diff --git a/fuzzers/baby_fuzzer_grimoire/corpus/new file b/fuzzers/structure_aware/baby_fuzzer_grimoire/corpus/new file similarity index 100% rename from fuzzers/baby_fuzzer_grimoire/corpus/new file rename to fuzzers/structure_aware/baby_fuzzer_grimoire/corpus/new file diff --git a/fuzzers/baby_fuzzer_grimoire/src/main.rs b/fuzzers/structure_aware/baby_fuzzer_grimoire/src/main.rs similarity index 100% rename from fuzzers/baby_fuzzer_grimoire/src/main.rs rename to fuzzers/structure_aware/baby_fuzzer_grimoire/src/main.rs diff --git a/fuzzers/libfuzzer_libpng_cmin/.gitignore b/fuzzers/structure_aware/baby_fuzzer_multi/.gitignore similarity index 100% rename from fuzzers/libfuzzer_libpng_cmin/.gitignore rename to fuzzers/structure_aware/baby_fuzzer_multi/.gitignore diff --git a/fuzzers/structure_aware/baby_fuzzer_multi/Cargo.toml b/fuzzers/structure_aware/baby_fuzzer_multi/Cargo.toml new file mode 100644 index 0000000000..a70ce9ee85 --- /dev/null +++ b/fuzzers/structure_aware/baby_fuzzer_multi/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "baby_fuzzer_multi" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", + "Addison Crump ", +] +edition = "2021" + +[features] +default = ["std"] +tui = ["libafl/tui_monitor"] +std = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl", features = ["multipart_inputs"] } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/baby_fuzzer_multi/README.md b/fuzzers/structure_aware/baby_fuzzer_multi/README.md similarity index 100% rename from fuzzers/baby_fuzzer_multi/README.md rename to fuzzers/structure_aware/baby_fuzzer_multi/README.md diff --git a/fuzzers/baby_fuzzer_multi/src/main.rs b/fuzzers/structure_aware/baby_fuzzer_multi/src/main.rs similarity index 87% rename from fuzzers/baby_fuzzer_multi/src/main.rs rename to fuzzers/structure_aware/baby_fuzzer_multi/src/main.rs index 8956f8c1ac..2cda5ebebd 100644 --- a/fuzzers/baby_fuzzer_multi/src/main.rs +++ b/fuzzers/structure_aware/baby_fuzzer_multi/src/main.rs @@ -1,9 +1,9 @@ +use std::path::PathBuf; #[cfg(windows)] use std::ptr::write_volatile; -use std::{path::PathBuf, ptr::write}; #[cfg(feature = "tui")] -use libafl::monitors::tui::{ui::TuiUI, TuiMonitor}; +use libafl::monitors::tui::TuiMonitor; #[cfg(not(feature = "tui"))] use libafl::monitors::SimpleMonitor; use libafl::{ @@ -14,26 +14,25 @@ use libafl::{ feedbacks::{CrashFeedback, MaxMapFeedback, MinMapFeedback}, fuzzer::{Fuzzer, StdFuzzer}, inputs::{BytesInput, HasTargetBytes, MultipartInput}, - mutators::scheduled::{havoc_mutations, StdScheduledMutator}, - observers::StdMapObserver, + mutators::{havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator}, + observers::ConstMapObserver, schedulers::QueueScheduler, stages::mutational::StdMutationalStage, state::StdState, Evaluator, }; -use libafl_bolts::{rands::StdRand, tuples::tuple_list, AsSlice}; +use libafl_bolts::{nonnull_raw_mut, rands::StdRand, tuples::tuple_list, AsSlice}; /// Coverage map with explicit assignments due to the lack of instrumentation static mut SIGNALS: [u8; 128] = [0; 128]; -static mut SIGNALS_PTR: *mut u8 = unsafe { SIGNALS.as_mut_ptr() }; +static mut SIGNALS_PTR: *mut [u8; 128] = &raw mut SIGNALS; /// "Coverage" map for count, just to help things along static mut LAST_COUNT: [usize; 1] = [usize::MAX]; -static mut LAST_COUNT_PTR: *mut usize = unsafe { LAST_COUNT.as_mut_ptr() }; /// Assign a signal to the signals map fn signals_set(idx: usize) { - unsafe { write(SIGNALS_PTR.add(idx), 1) }; + unsafe { (*SIGNALS_PTR)[idx] = 1 }; } /// Assign a count to the count "map" @@ -83,9 +82,9 @@ pub fn main() { // Create an observation channel using the signals map let signals_observer = - unsafe { StdMapObserver::from_mut_ptr("signals", SIGNALS_PTR, SIGNALS.len()) }; + unsafe { ConstMapObserver::from_mut_ptr("signals", nonnull_raw_mut!(SIGNALS)) }; let mut count_observer = - unsafe { StdMapObserver::from_mut_ptr("count", LAST_COUNT_PTR, LAST_COUNT.len()) }; + unsafe { ConstMapObserver::from_mut_ptr("count", nonnull_raw_mut!(LAST_COUNT)) }; *count_observer.initial_mut() = usize::MAX; // we are minimising! // Feedback to rate the interestingness of an input @@ -118,9 +117,10 @@ pub fn main() { #[cfg(not(feature = "tui"))] let mon = SimpleMonitor::new(|s| println!("{s}")); #[cfg(feature = "tui")] - let ui = TuiUI::with_version(String::from("Baby Fuzzer"), String::from("0.0.1"), false); - #[cfg(feature = "tui")] - let mon = TuiMonitor::new(ui); + let mon = TuiMonitor::builder() + .title("Baby Fuzzer") + .enhanced_graphics(false) + .build(); // The event manager handle the various events generated during the fuzzing loop // such as the notification of the addition of a new item to the corpus diff --git a/fuzzers/libfuzzer_libpng_tcp_manager/.gitignore b/fuzzers/structure_aware/baby_fuzzer_nautilus/.gitignore similarity index 100% rename from fuzzers/libfuzzer_libpng_tcp_manager/.gitignore rename to fuzzers/structure_aware/baby_fuzzer_nautilus/.gitignore diff --git a/fuzzers/structure_aware/baby_fuzzer_nautilus/Cargo.toml b/fuzzers/structure_aware/baby_fuzzer_nautilus/Cargo.toml new file mode 100644 index 0000000000..410bad9970 --- /dev/null +++ b/fuzzers/structure_aware/baby_fuzzer_nautilus/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "baby_fuzzer_nautilus" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl", features = ["default", "nautilus"] } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/baby_fuzzer_nautilus/README.md b/fuzzers/structure_aware/baby_fuzzer_nautilus/README.md similarity index 100% rename from fuzzers/baby_fuzzer_nautilus/README.md rename to fuzzers/structure_aware/baby_fuzzer_nautilus/README.md diff --git a/fuzzers/baby_fuzzer_nautilus/grammar.json b/fuzzers/structure_aware/baby_fuzzer_nautilus/grammar.json similarity index 100% rename from fuzzers/baby_fuzzer_nautilus/grammar.json rename to fuzzers/structure_aware/baby_fuzzer_nautilus/grammar.json diff --git a/fuzzers/baby_fuzzer_nautilus/src/main.rs b/fuzzers/structure_aware/baby_fuzzer_nautilus/src/main.rs similarity index 93% rename from fuzzers/baby_fuzzer_nautilus/src/main.rs rename to fuzzers/structure_aware/baby_fuzzer_nautilus/src/main.rs index 75a2fcbe9d..780ebef137 100644 --- a/fuzzers/baby_fuzzer_nautilus/src/main.rs +++ b/fuzzers/structure_aware/baby_fuzzer_nautilus/src/main.rs @@ -35,7 +35,7 @@ fn signals_set(idx: usize) { #[allow(clippy::similar_names)] pub fn main() { - let context = NautilusContext::from_file(15, "grammar.json"); + let context = NautilusContext::from_file(15, "grammar.json").unwrap(); let mut bytes = vec![]; // The closure that we want to fuzz @@ -76,13 +76,9 @@ pub fn main() { ) .unwrap(); - if state - .metadata_map() - .get::() - .is_none() - { - state.add_metadata(NautilusChunksMetadata::new("/tmp/".into())); - } + let _ = state.metadata_or_insert_with::(|| { + NautilusChunksMetadata::new("/tmp/".into()) + }); // The Monitor trait define how the fuzzer stats are reported to the user let monitor = SimpleMonitor::new(|s| println!("{s}")); @@ -139,9 +135,11 @@ pub fn main() { */ // Generate 8 initial inputs - state - .generate_initial_inputs_forced(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8) - .expect("Failed to generate the initial corpus"); + if state.must_load_initial_inputs() { + state + .generate_initial_inputs_forced(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8) + .expect("Failed to generate the initial corpus"); + } // Setup a mutational stage with a basic bytes mutator let mutator = StdScheduledMutator::with_max_stack_pow( diff --git a/fuzzers/push_harness/.gitignore b/fuzzers/structure_aware/baby_fuzzer_tokens/.gitignore similarity index 100% rename from fuzzers/push_harness/.gitignore rename to fuzzers/structure_aware/baby_fuzzer_tokens/.gitignore diff --git a/fuzzers/structure_aware/baby_fuzzer_tokens/Cargo.toml b/fuzzers/structure_aware/baby_fuzzer_tokens/Cargo.toml new file mode 100644 index 0000000000..b07e8a3d77 --- /dev/null +++ b/fuzzers/structure_aware/baby_fuzzer_tokens/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "baby_fuzzer_tokens" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl" } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } diff --git a/fuzzers/baby_fuzzer_tokens/README.md b/fuzzers/structure_aware/baby_fuzzer_tokens/README.md similarity index 100% rename from fuzzers/baby_fuzzer_tokens/README.md rename to fuzzers/structure_aware/baby_fuzzer_tokens/README.md diff --git a/fuzzers/baby_fuzzer_tokens/corpus/new file b/fuzzers/structure_aware/baby_fuzzer_tokens/corpus/new file similarity index 100% rename from fuzzers/baby_fuzzer_tokens/corpus/new file rename to fuzzers/structure_aware/baby_fuzzer_tokens/corpus/new file diff --git a/fuzzers/baby_fuzzer_tokens/src/main.rs b/fuzzers/structure_aware/baby_fuzzer_tokens/src/main.rs similarity index 100% rename from fuzzers/baby_fuzzer_tokens/src/main.rs rename to fuzzers/structure_aware/baby_fuzzer_tokens/src/main.rs diff --git a/fuzzers/structure_aware/forkserver_simple_nautilus/.gitignore b/fuzzers/structure_aware/forkserver_simple_nautilus/.gitignore new file mode 100644 index 0000000000..edda62af82 --- /dev/null +++ b/fuzzers/structure_aware/forkserver_simple_nautilus/.gitignore @@ -0,0 +1 @@ +forkserver_simple diff --git a/fuzzers/structure_aware/forkserver_simple_nautilus/Cargo.toml b/fuzzers/structure_aware/forkserver_simple_nautilus/Cargo.toml new file mode 100644 index 0000000000..ad33101a02 --- /dev/null +++ b/fuzzers/structure_aware/forkserver_simple_nautilus/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "forkserver_simple" +version = "0.14.1" +authors = ["tokatoka "] +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[profile.dev] +panic = "abort" + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 + +[dependencies] +clap = { version = "4.5.18", features = ["derive"] } +env_logger = "0.11.5" +libafl = { path = "../../../libafl", features = ["std", "derive", "nautilus"] } +libafl_bolts = { path = "../../../libafl_bolts" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +nix = { version = "0.29.0", features = ["signal"] } diff --git a/fuzzers/structure_aware/forkserver_simple_nautilus/README.md b/fuzzers/structure_aware/forkserver_simple_nautilus/README.md new file mode 100644 index 0000000000..aeb558e600 --- /dev/null +++ b/fuzzers/structure_aware/forkserver_simple_nautilus/README.md @@ -0,0 +1,13 @@ +# Simple Forkserver Fuzzer + +This is a simple example fuzzer to fuzz a executable instrumented by afl-cc. +## Usage +You can build this example by `cargo build --release`. +This downloads AFLplusplus/AFLplusplus and compiles the example harness program in src/program.c with afl-cc + +## Run +After you build it you can run +`cp ./target/release/forkserver_simple .` to copy the fuzzer into this directory, +and you can run +`taskset -c 1 ./forkserver_simple ./target/release/program ./corpus/ -t 1000` to run the fuzzer. +`taskset` binds this process to a specific core to improve the throughput. \ No newline at end of file diff --git a/fuzzers/structure_aware/forkserver_simple_nautilus/build.rs b/fuzzers/structure_aware/forkserver_simple_nautilus/build.rs new file mode 100644 index 0000000000..dc8c8dd146 --- /dev/null +++ b/fuzzers/structure_aware/forkserver_simple_nautilus/build.rs @@ -0,0 +1,59 @@ +use std::{ + env, + path::Path, + process::{exit, Command}, +}; + +const AFL_URL: &str = "https://github.com/AFLplusplus/AFLplusplus"; + +fn main() { + if cfg!(windows) { + println!("cargo:warning=No support for windows yet."); + exit(0); + } + + env::remove_var("DEBUG"); + let cwd = env::current_dir().unwrap().to_string_lossy().to_string(); + + let afl = format!("{}/AFLplusplus", &cwd); + let afl_cc = format!("{}/AFLplusplus/afl-cc", &cwd); + + let afl_path = Path::new(&afl); + let afl_cc_path = Path::new(&afl_cc); + + if !afl_path.is_dir() { + println!("cargo:warning=AFL++ not found, downloading..."); + Command::new("git") + .arg("clone") + .arg(AFL_URL) + .status() + .unwrap(); + } + + if !afl_cc_path.is_file() { + let mut afl_cc_make = Command::new("make"); + afl_cc_make.arg("all").current_dir(afl_path); + if let Ok(llvm_config) = env::var("LLVM_CONFIG") { + if !llvm_config.is_empty() { + afl_cc_make.env("LLVM_CONFIG", llvm_config); + } + } + afl_cc_make.status().unwrap(); + } + + let mut compile_command = Command::new(afl_cc_path); + compile_command + .args(["src/program.c", "-o"]) + .arg(format!("{cwd}/target/release/program")); + + if let Ok(llvm_config) = env::var("LLVM_CONFIG") { + if !llvm_config.is_empty() { + compile_command.env("LLVM_CONFIG", llvm_config); + } + } + + compile_command.status().unwrap(); + + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-changed=src/"); +} diff --git a/fuzzers/structure_aware/forkserver_simple_nautilus/corpus/testfile b/fuzzers/structure_aware/forkserver_simple_nautilus/corpus/testfile new file mode 100644 index 0000000000..72943a16fb --- /dev/null +++ b/fuzzers/structure_aware/forkserver_simple_nautilus/corpus/testfile @@ -0,0 +1 @@ +aaa diff --git a/fuzzers/structure_aware/forkserver_simple_nautilus/src/main.rs b/fuzzers/structure_aware/forkserver_simple_nautilus/src/main.rs new file mode 100644 index 0000000000..61dbede569 --- /dev/null +++ b/fuzzers/structure_aware/forkserver_simple_nautilus/src/main.rs @@ -0,0 +1,227 @@ +use core::time::Duration; +use std::path::PathBuf; + +use clap::Parser; +use libafl::{ + corpus::{InMemoryCorpus, OnDiskCorpus}, + events::SimpleEventManager, + executors::{forkserver::ForkserverExecutor, HasObservers}, + feedback_and_fast, feedback_or, + feedbacks::{ + CrashFeedback, MaxMapFeedback, NautilusChunksMetadata, NautilusFeedback, TimeFeedback, + }, + fuzzer::{Fuzzer, StdFuzzer}, + generators::{NautilusContext, NautilusGenerator}, + inputs::{NautilusInput, NautilusTargetBytesConverter}, + monitors::SimpleMonitor, + mutators::{ + NautilusRandomMutator, NautilusRecursionMutator, NautilusSpliceMutator, + StdScheduledMutator, Tokens, + }, + observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, + schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}, + stages::mutational::StdMutationalStage, + state::StdState, + HasMetadata, +}; +use libafl_bolts::{ + current_nanos, + rands::StdRand, + shmem::{ShMem, ShMemProvider, UnixShMemProvider}, + tuples::{tuple_list, Handled}, + AsSliceMut, Truncate, +}; +use nix::sys::signal::Signal; + +/// The commandline args this fuzzer accepts +#[derive(Debug, Parser)] +#[command( + name = "forkserver_simple", + about = "This is a simple example fuzzer to fuzz a executable instrumented by afl-cc, using Nautilus grammar.", + author = "tokatoka , dmnk " +)] +struct Opt { + #[arg( + help = "The instrumented binary we want to fuzz", + name = "EXEC", + required = true + )] + executable: String, + + #[arg( + help = "Timeout for each individual execution, in milliseconds", + short = 't', + long = "timeout", + default_value = "1200" + )] + timeout: u64, + + #[arg( + help = "If not set, the child's stdout and stderror will be redirected to /dev/null", + short = 'd', + long = "debug-child", + default_value = "false" + )] + debug_child: bool, + + #[arg( + help = "Arguments passed to the target", + name = "arguments", + num_args(1..), + allow_hyphen_values = true, + )] + arguments: Vec, + + #[arg( + help = "Signal used to stop child", + short = 's', + long = "signal", + value_parser = str::parse::, + default_value = "SIGKILL" + )] + signal: Signal, + + #[arg(help = "The nautilus grammar file", short)] + grammar: PathBuf, +} + +#[allow(clippy::similar_names)] +pub fn main() { + env_logger::init(); + const MAP_SIZE: usize = 65536; + + let opt = Opt::parse(); + + let mut shmem_provider = UnixShMemProvider::new().unwrap(); + + // The coverage map shared between observer and executor + let mut shmem = shmem_provider.new_shmem(MAP_SIZE).unwrap(); + // let the forkserver know the shmid + shmem.write_to_env("__AFL_SHM_ID").unwrap(); + let shmem_buf = shmem.as_slice_mut(); + + // Create an observation channel using the signals map + let edges_observer = unsafe { + HitcountsMapObserver::new(StdMapObserver::new("shared_mem", shmem_buf)).track_indices() + }; + + // Create an observation channel to keep track of the execution time + let time_observer = TimeObserver::new("time"); + + let context = NautilusContext::from_file(15, opt.grammar).unwrap(); + + // Feedback to rate the interestingness of an input + // This one is composed by two Feedbacks in OR + let mut feedback = feedback_or!( + // New maximization map feedback linked to the edges observer and the feedback state + MaxMapFeedback::new(&edges_observer), + // Time feedback, this one does not need a feedback state + TimeFeedback::new(&time_observer), + // Nautilus context + NautilusFeedback::new(&context), + ); + + // A feedback to choose if an input is a solution or not + // We want to do the same crash deduplication that AFL does + let mut objective = feedback_and_fast!( + // Must be a crash + CrashFeedback::new(), + // Take it only if trigger new coverage over crashes + // Uses `with_name` to create a different history from the `MaxMapFeedback` in `feedback` above + MaxMapFeedback::with_name("mapfeedback_metadata_objective", &edges_observer) + ); + + // create a State from scratch + let mut state = StdState::new( + // RNG + StdRand::with_seed(current_nanos()), + // Corpus that will be evolved, we keep it in memory for performance + InMemoryCorpus::::new(), + // Corpus in which we store solutions (crashes in this example), + // on disk so the user can get them after stopping the fuzzer + OnDiskCorpus::new(PathBuf::from("./crashes")).unwrap(), + // States of the feedbacks. + // The feedbacks can report the data that should persist in the State. + &mut feedback, + // Same for objective feedbacks + &mut objective, + ) + .unwrap(); + + let _ = state.metadata_or_insert_with::(|| { + NautilusChunksMetadata::new("/tmp/".into()) + }); + + // The Monitor trait define how the fuzzer stats are reported to the user + let monitor = SimpleMonitor::new(|s| println!("{s}")); + + // The event manager handle the various events generated during the fuzzing loop + // such as the notification of the addition of a new item to the corpus + let mut mgr = SimpleEventManager::new(monitor); + + // A minimization+queue policy to get testcasess from the corpus + let scheduler = IndexesLenTimeMinimizerScheduler::new(&edges_observer, QueueScheduler::new()); + + // A fuzzer with feedbacks and a corpus scheduler + let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); + + // If we should debug the child + let debug_child = opt.debug_child; + + // Create the executor for the forkserver + let args = opt.arguments; + + let observer_ref = edges_observer.handle(); + + let mut tokens = Tokens::new(); + let mut executor = ForkserverExecutor::builder() + .program(opt.executable) + .debug_child(debug_child) + .shmem_provider(&mut shmem_provider) + .autotokens(&mut tokens) + .parse_afl_cmdline(args) + .coverage_map_size(MAP_SIZE) + .timeout(Duration::from_millis(opt.timeout)) + .kill_signal(opt.signal) + .target_bytes_converter(NautilusTargetBytesConverter::new(&context)) + .build(tuple_list!(time_observer, edges_observer)) + .unwrap(); + + if let Some(dynamic_map_size) = executor.coverage_map_size() { + executor.observers_mut()[&observer_ref] + .as_mut() + .truncate(dynamic_map_size); + } + + let mut generator = NautilusGenerator::new(&context); + + if state.must_load_initial_inputs() { + state + .generate_initial_inputs(&mut fuzzer, &mut executor, &mut generator, &mut mgr, 8) + .expect("Failed to generate inputs"); + } + + state.add_metadata(tokens); + + // Setup a mutational stage with a basic bytes mutator + let mutator = StdScheduledMutator::with_max_stack_pow( + tuple_list!( + NautilusRandomMutator::new(&context), + NautilusRandomMutator::new(&context), + NautilusRandomMutator::new(&context), + NautilusRandomMutator::new(&context), + NautilusRandomMutator::new(&context), + NautilusRandomMutator::new(&context), + NautilusRecursionMutator::new(&context), + NautilusSpliceMutator::new(&context), + NautilusSpliceMutator::new(&context), + NautilusSpliceMutator::new(&context), + ), + 2, + ); + let mut stages = tuple_list!(StdMutationalStage::new(mutator)); + + fuzzer + .fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr) + .expect("Error in the fuzzing loop"); +} diff --git a/fuzzers/structure_aware/forkserver_simple_nautilus/src/program.c b/fuzzers/structure_aware/forkserver_simple_nautilus/src/program.c new file mode 100644 index 0000000000..f6c1a515b5 --- /dev/null +++ b/fuzzers/structure_aware/forkserver_simple_nautilus/src/program.c @@ -0,0 +1,35 @@ +#include +#include +#include + +// The following line is needed for shared memeory testcase fuzzing +__AFL_FUZZ_INIT(); + +void vuln(char *buf) { + if (strcmp(buf, "vuln") == 0) { abort(); } +} + +int main(int argc, char **argv) { + FILE *file = stdin; + if (argc > 1) { file = fopen(argv[1], "rb"); } + + // The following three lines are for normal fuzzing. + /* + char buf[16]; + char* p = fgets(buf, 16, file); + buf[15] = 0; + */ + + // The following line is also needed for shared memory testcase fuzzing + unsigned char *buf = __AFL_FUZZ_TESTCASE_BUF; + + printf("input: %s\n", buf); + if (buf[0] == 'b') { + if (buf[1] == 'a') { + if (buf[2] == 'd') { abort(); } + } + } + vuln(buf); + + return 0; +} \ No newline at end of file diff --git a/fuzzers/libfuzzer_stb_image_concolic/Makefile.toml b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/Makefile.toml similarity index 86% rename from fuzzers/libfuzzer_stb_image_concolic/Makefile.toml rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/Makefile.toml index 64218aeb10..b9ea9e9d06 100644 --- a/fuzzers/libfuzzer_stb_image_concolic/Makefile.toml +++ b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/Makefile.toml @@ -1,10 +1,12 @@ # Variables [env] PROJECT_DIR = { script = ["pwd"] } -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } PROFILE = { value = "release" } -PROFILE_DIR = {value = "release" } -FUZZER_NAME='libfuzzer_stb_image_concolic' +PROFILE_DIR = { value = "release" } +FUZZER_NAME = 'libfuzzer_stb_image_concolic' # Compilers [tasks.runtime] @@ -40,7 +42,7 @@ alias = "fuzzer" [tasks.unsupported] # Do nothing script_runner = "@shell" -script=''' +script = ''' echo "Not supported on this platform." ''' @@ -48,7 +50,7 @@ echo "Not supported on this platform." [tasks.clean] # Disable default `clean` definition clear = true -script=''' +script = ''' cd fuzzer cargo clean cd .. diff --git a/fuzzers/libfuzzer_stb_image_concolic/README.md b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/README.md similarity index 100% rename from fuzzers/libfuzzer_stb_image_concolic/README.md rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/README.md diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/.gitignore b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/.gitignore similarity index 100% rename from fuzzers/libfuzzer_stb_image_concolic/fuzzer/.gitignore rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/.gitignore diff --git a/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml new file mode 100644 index 0000000000..a5eb95f69c --- /dev/null +++ b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "libfuzzer_stb_image_concolic" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", + "Julius Hohnerlein", +] +edition = "2021" +build = "build.rs" + +[features] +default = ["std"] +std = [] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../../libafl", features = ["concolic_mutation"] } +libafl_bolts = { path = "../../../../libafl_bolts" } +libafl_targets = { path = "../../../../libafl_targets", features = [ + "sancov_pcguard_edges", + "sancov_cmplog", + "libfuzzer", +] } +clap = { version = "4.5.18", features = ["derive"] } +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } + +[build-dependencies] +cc = { version = "1.1.22", features = ["parallel"] } +cmake = "0.1.51" +which = "6.0.3" +symcc_libafl = { path = "../../../../libafl_concolic/symcc_libafl" } diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/build.rs b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/build.rs similarity index 100% rename from fuzzers/libfuzzer_stb_image_concolic/fuzzer/build.rs rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/build.rs diff --git a/fuzzers/qemu_coverage/corpus/not_kitty.png b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty.png similarity index 100% rename from fuzzers/qemu_coverage/corpus/not_kitty.png rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty.png diff --git a/fuzzers/qemu_coverage/corpus/not_kitty_alpha.png b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_alpha.png similarity index 100% rename from fuzzers/qemu_coverage/corpus/not_kitty_alpha.png rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_alpha.png diff --git a/fuzzers/qemu_coverage/corpus/not_kitty_gamma.png b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_gamma.png similarity index 100% rename from fuzzers/qemu_coverage/corpus/not_kitty_gamma.png rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_gamma.png diff --git a/fuzzers/qemu_coverage/corpus/not_kitty_icc.png b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_icc.png similarity index 100% rename from fuzzers/qemu_coverage/corpus/not_kitty_icc.png rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/corpus/not_kitty_icc.png diff --git a/fuzzers/libfuzzer_stb_image_sugar/harness.c b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/harness.c similarity index 100% rename from fuzzers/libfuzzer_stb_image_sugar/harness.c rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/harness.c diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/harness_symcc.c b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/harness_symcc.c similarity index 100% rename from fuzzers/libfuzzer_stb_image_concolic/fuzzer/harness_symcc.c rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/harness_symcc.c diff --git a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/src/main.rs similarity index 96% rename from fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/src/main.rs index f68bea3abf..f7703e40a9 100644 --- a/fuzzers/libfuzzer_stb_image_concolic/fuzzer/src/main.rs +++ b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/src/main.rs @@ -20,7 +20,7 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes, Input}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, scheduled::StdScheduledMutator, token_mutations::I2SRandReplace, }, observers::{ @@ -156,7 +156,9 @@ fn fuzz( let mut harness = |input: &BytesInput| { let target = input.target_bytes(); let buf = target.as_slice(); - libfuzzer_test_one_input(buf); + unsafe { + libfuzzer_test_one_input(buf); + } ExitKind::Ok }; @@ -175,7 +177,7 @@ fn fuzz( // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { + if unsafe { libfuzzer_initialize(&args) } == -1 { println!("Warning: LLVMFuzzerInitialize failed with -1"); } @@ -214,12 +216,12 @@ fn fuzz( // Create a concolic trace ConcolicTracingStage::new( TracingStage::new( - MyCommandConfigurator.into_executor(tuple_list!(concolic_observer)) + MyCommandConfigurator.into_executor(tuple_list!(concolic_observer)), ), concolic_ref, ), // Use the concolic trace for z3-based solving - SimpleConcolicMutationalStage::default(), + SimpleConcolicMutationalStage::new(), ); fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut restarting_mgr)?; @@ -254,4 +256,8 @@ impl CommandConfigurator for MyCommandConfigurator { fn exec_timeout(&self) -> Duration { Duration::from_secs(5) } + + fn exec_timeout_mut(&mut self) -> &mut Duration { + todo!() + } } diff --git a/fuzzers/libfuzzer_stb_image_sugar/stb_image.h b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/stb_image.h similarity index 100% rename from fuzzers/libfuzzer_stb_image_sugar/stb_image.h rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/fuzzer/stb_image.h diff --git a/fuzzers/libfuzzer_stb_image_concolic/runtime/Cargo.toml b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/runtime/Cargo.toml similarity index 74% rename from fuzzers/libfuzzer_stb_image_concolic/runtime/Cargo.toml rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/runtime/Cargo.toml index b8efd0c371..ffb3d09c08 100644 --- a/fuzzers/libfuzzer_stb_image_concolic/runtime/Cargo.toml +++ b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/runtime/Cargo.toml @@ -1,12 +1,12 @@ [package] name = "example_runtime" -version = "0.13.0" +version = "0.14.1" edition = "2021" authors = ["Julius Hohnerlein "] [lib] # the runtime needs to be a shared object -> cdylib -crate-type = ["cdylib"] +crate-type = ["cdylib"] # this is necessary for SymCC to find the runtime. name = "SymRuntime" @@ -20,4 +20,5 @@ panic = "abort" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -symcc_runtime = { path = "../../../libafl_concolic/symcc_runtime" } +log = { version = "0.4.22", features = ["release_max_level_info"] } +symcc_runtime = { path = "../../../../libafl_concolic/symcc_runtime" } diff --git a/fuzzers/libfuzzer_stb_image_concolic/runtime/src/lib.rs b/fuzzers/structure_aware/libfuzzer_stb_image_concolic/runtime/src/lib.rs similarity index 100% rename from fuzzers/libfuzzer_stb_image_concolic/runtime/src/lib.rs rename to fuzzers/structure_aware/libfuzzer_stb_image_concolic/runtime/src/lib.rs diff --git a/fuzzers/nautilus_sync/.gitignore b/fuzzers/structure_aware/nautilus_sync/.gitignore similarity index 100% rename from fuzzers/nautilus_sync/.gitignore rename to fuzzers/structure_aware/nautilus_sync/.gitignore diff --git a/fuzzers/structure_aware/nautilus_sync/Cargo.toml b/fuzzers/structure_aware/nautilus_sync/Cargo.toml new file mode 100644 index 0000000000..1bc7b2f4ce --- /dev/null +++ b/fuzzers/structure_aware/nautilus_sync/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "nautilus_sync" +version = "0.14.1" +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] +edition = "2021" + +[features] +default = ["std"] +std = [] + +[profile.dev] +panic = "abort" +debug = true + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +[dependencies] +libafl = { path = "../../../libafl", features = ["default", "nautilus"] } +libafl_bolts = { path = "../../../libafl_bolts" } +libafl_targets = { path = "../../../libafl_targets", features = [ + "sancov_pcguard_hitcounts", + "libfuzzer", +] } +# TODO Include it only when building cc +libafl_cc = { path = "../../../libafl_cc" } +clap = { version = "4.5.18", features = ["derive"] } +mimalloc = { version = "0.1.43", default-features = false } + +[lib] +name = "nautilus_sync" +crate-type = ["staticlib"] diff --git a/fuzzers/nautilus_sync/Makefile.toml b/fuzzers/structure_aware/nautilus_sync/Makefile.toml similarity index 69% rename from fuzzers/nautilus_sync/Makefile.toml rename to fuzzers/structure_aware/nautilus_sync/Makefile.toml index c741b0d41e..430268092d 100644 --- a/fuzzers/nautilus_sync/Makefile.toml +++ b/fuzzers/structure_aware/nautilus_sync/Makefile.toml @@ -1,17 +1,21 @@ # Variables [env] -FUZZER_NAME='fuzzer_libpng_nautilus' -CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = ["CARGO_TARGET_DIR"] } } -PROFILE = { value = "release", condition = {env_not_set = ["PROFILE"]} } -PROFILE_DIR = {value = "release", condition = {env_not_set = ["PROFILE_DIR"] }} +FUZZER_NAME = 'fuzzer_libpng_nautilus' +CARGO_TARGET_DIR = { value = "${PROJECT_DIR}/target", condition = { env_not_set = [ + "CARGO_TARGET_DIR", +] } } +PROFILE = { value = "release", condition = { env_not_set = ["PROFILE"] } } +PROFILE_DIR = { source = "${PROFILE}", default_value = "release", mapping = { "release" = "release", "dev" = "debug" }, condition = { env_not_set = [ + "PROFILE_DIR", +] } } LIBAFL_CC = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc' LIBAFL_CXX = '${CARGO_TARGET_DIR}/${PROFILE}/libafl_cxx' FUZZER = '${CARGO_TARGET_DIR}/${PROFILE_DIR}/${FUZZER_NAME}' PROJECT_DIR = { script = ["pwd"] } [tasks.unsupported] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' echo "Cargo-make not integrated yet on this platform" ''' @@ -22,9 +26,9 @@ mac_alias = "libpng_unix" windows_alias = "unsupported" [tasks.libpng_unix] -condition = { files_not_exist = ["./libpng-1.6.37"]} -script_runner="@shell" -script=''' +condition = { files_not_exist = ["./libpng-1.6.37"] } +script_runner = "@shell" +script = ''' wget https://github.com/glennrp/libpng/archive/refs/tags/v1.6.37.tar.gz tar -xvf v1.6.37.tar.gz ''' @@ -37,7 +41,7 @@ windows_alias = "unsupported" [tasks.cxx_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] [tasks.cc] linux_alias = "cc_unix" @@ -46,7 +50,7 @@ windows_alias = "unsupported" [tasks.cc_unix] command = "cargo" -args = ["build" , "--profile", "${PROFILE}"] +args = ["build", "--profile", "${PROFILE}"] # Library [tasks.lib] @@ -55,14 +59,14 @@ mac_alias = "lib_unix" windows_alias = "unsupported" [tasks.lib_unix] -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' cd libpng-1.6.37 && ./configure --enable-shared=no --with-pic=yes --enable-hardware-optimizations=yes cd "${PROJECT_DIR}" -cp ../baby_fuzzer_nautilus/grammar.json . +cp ../../structure_aware/baby_fuzzer_nautilus/grammar.json . make -C libpng-1.6.37 CC="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cc" CXX="${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" ''' -dependencies = [ "libpng", "cxx", "cc" ] +dependencies = ["libpng", "cxx", "cc"] # Harness @@ -73,8 +77,17 @@ windows_alias = "unsupported" [tasks.fuzzer_unix] command = "${CARGO_TARGET_DIR}/${PROFILE_DIR}/libafl_cxx" -args = ["${PROJECT_DIR}/../libfuzzer_libpng/harness.cc", "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", "-I", "${PROJECT_DIR}/libpng-1.6.37/", "-o", "${FUZZER_NAME}", "-lm", "-lz"] -dependencies = [ "lib", "cxx", "cc" ] +args = [ + "${PROJECT_DIR}/../../inprocess/libfuzzer_libpng/harness.cc", + "${PROJECT_DIR}/libpng-1.6.37/.libs/libpng16.a", + "-I", + "${PROJECT_DIR}/libpng-1.6.37/", + "-o", + "${FUZZER_NAME}", + "-lm", + "-lz", +] +dependencies = ["lib", "cxx", "cc"] # Run the fuzzer [tasks.run] @@ -84,17 +97,17 @@ windows_alias = "unsupported" [tasks.run_unix] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} --cores 0 ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.run_unix_sync] script_runner = "@shell" -script=''' +script = ''' ./${FUZZER_NAME} --cores 0 -b 1337 ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Test [tasks.test] @@ -104,7 +117,7 @@ windows_alias = "unsupported" [tasks.test_unix] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 31s ./${FUZZER_NAME} --cores 0 | tee fuzz_stdout.log 2>/dev/null || true if grep -qa "corpus: 8" fuzz_stdout.log; then @@ -114,15 +127,15 @@ else exit 1 fi ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] [tasks.test_mac] script_runner = "@shell" -script=''' +script = ''' rm -rf libafl_unix_shmem_server || true timeout 31s ./${FUZZER_NAME} --cores 0 | tee fuzz_stdout.log 2>/dev/null || true ''' -dependencies = [ "fuzzer" ] +dependencies = ["fuzzer"] # Clean up [tasks.clean] @@ -133,8 +146,8 @@ windows_alias = "unsupported" [tasks.clean_unix] # Disable default `clean` definition clear = true -script_runner="@shell" -script=''' +script_runner = "@shell" +script = ''' rm -f ./${FUZZER_NAME} make -C libpng-1.6.37 clean cargo clean diff --git a/fuzzers/tutorial/rust-toolchain b/fuzzers/structure_aware/nautilus_sync/rust-toolchain similarity index 100% rename from fuzzers/tutorial/rust-toolchain rename to fuzzers/structure_aware/nautilus_sync/rust-toolchain diff --git a/fuzzers/nautilus_sync/src/bin/libafl_cc.rs b/fuzzers/structure_aware/nautilus_sync/src/bin/libafl_cc.rs similarity index 63% rename from fuzzers/nautilus_sync/src/bin/libafl_cc.rs rename to fuzzers/structure_aware/nautilus_sync/src/bin/libafl_cc.rs index 1f79c67c9f..ff4d871f97 100644 --- a/fuzzers/nautilus_sync/src/bin/libafl_cc.rs +++ b/fuzzers/structure_aware/nautilus_sync/src/bin/libafl_cc.rs @@ -1,25 +1,8 @@ -use std::{env, process::Command, str}; +use std::env; use libafl_cc::{ClangWrapper, CompilerWrapper, ToolWrapper}; -fn find_libpython() -> Result { - match Command::new("python3") - .args(["-m", "find_libpython"]) - .output() - { - Ok(output) => { - let shared_obj = str::from_utf8(&output.stdout).unwrap_or_default().trim(); - if shared_obj.is_empty() { - return Err("Empty return from python3 -m find_libpython".to_string()); - } - Ok(shared_obj.to_owned()) - } - Err(err) => Err(format!( - "Could not execute python3 -m find_libpython: {err:?}" - )), - } -} - +#[allow(clippy::missing_panics_doc)] pub fn main() { let args: Vec = env::args().collect(); if args.len() > 1 { @@ -34,8 +17,6 @@ pub fn main() { dir.pop(); - let libpython = find_libpython().expect("Failed to find libpython"); - let mut cc = ClangWrapper::new(); if let Some(code) = cc .cpp(is_cpp) @@ -45,9 +26,10 @@ pub fn main() { .expect("Failed to parse the command line") .link_staticlib(&dir, "nautilus_sync") .add_arg("-fsanitize-coverage=trace-pc-guard") - // needed by Nautilus - .add_link_arg(libpython) .add_link_arg("-lutil") + // needed by Nautilus + .link_libpython() + .expect("Could not find libpython") .run() .expect("Failed to run the wrapped compiler") { diff --git a/fuzzers/tutorial/src/bin/libafl_cxx.rs b/fuzzers/structure_aware/nautilus_sync/src/bin/libafl_cxx.rs similarity index 100% rename from fuzzers/tutorial/src/bin/libafl_cxx.rs rename to fuzzers/structure_aware/nautilus_sync/src/bin/libafl_cxx.rs diff --git a/fuzzers/nautilus_sync/src/lib.rs b/fuzzers/structure_aware/nautilus_sync/src/lib.rs similarity index 96% rename from fuzzers/nautilus_sync/src/lib.rs rename to fuzzers/structure_aware/nautilus_sync/src/lib.rs index 793e18e6fe..92fc4926a5 100644 --- a/fuzzers/nautilus_sync/src/lib.rs +++ b/fuzzers/structure_aware/nautilus_sync/src/lib.rs @@ -98,6 +98,7 @@ struct Opt { /// The main fn, `no_mangle` as it is a C symbol #[no_mangle] +#[allow(clippy::missing_panics_doc, clippy::too_many_lines)] pub extern "C" fn libafl_main() { // Registry the metadata types used in this fuzzer // Needed only on no_std @@ -117,7 +118,7 @@ pub extern "C" fn libafl_main() { // The Monitor trait define how the fuzzer stats are reported to the user let monitor = SimpleMonitor::new(|s| println!("{s}")); - let context = NautilusContext::from_file(15, "grammar.json"); + let context = NautilusContext::from_file(15, "grammar.json").unwrap(); let mut event_converter = opt.bytes_broker_port.map(|port| { LlmpEventConverter::builder() @@ -133,13 +134,15 @@ pub extern "C" fn libafl_main() { // to disconnect the event coverter from the broker later // call detach_from_broker( port) - let mut run_client = |state: Option<_>, mut mgr, _core_id| { + let mut run_client = |state: Option<_>, mut mgr, _client_description| { let mut bytes = vec![]; // The closure that we want to fuzz let mut harness = |input: &NautilusInput| { input.unparse(&context, &mut bytes); - libfuzzer_test_one_input(&bytes); + unsafe { + libfuzzer_test_one_input(&bytes); + } ExitKind::Ok }; @@ -201,8 +204,8 @@ pub extern "C" fn libafl_main() { // The actual target run starts here. // Call LLVMFUzzerInitialize() if present. let args: Vec = env::args().collect(); - if libfuzzer_initialize(&args) == -1 { - println!("Warning: LLVMFuzzerInitialize failed with -1") + if unsafe { libfuzzer_initialize(&args) } == -1 { + println!("Warning: LLVMFuzzerInitialize failed with -1"); } let mut generator = NautilusGenerator::new(&context); diff --git a/fuzzers/tinyinst_simple/Cargo.toml b/fuzzers/tinyinst_simple/Cargo.toml deleted file mode 100644 index d44e8052cf..0000000000 --- a/fuzzers/tinyinst_simple/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "tinyinst_simple" -version = "0.13.0" -edition = "2021" - -[dependencies] -libafl = { path = "../../libafl", features = ["introspection"] } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_tinyinst = { path = "../../libafl_tinyinst" } - -[profile.release] -codegen-units = 1 -opt-level = 3 diff --git a/fuzzers/tutorial/Cargo.toml b/fuzzers/tutorial/Cargo.toml deleted file mode 100644 index c804567b88..0000000000 --- a/fuzzers/tutorial/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "tutorial" -version = "0.13.0" -authors = ["Andrea Fioraldi ", "Dominik Maier "] -edition = "2021" - -[features] -default = ["std"] -std = [] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -[build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "4.4" - -[dependencies] -libafl = { path = "../../libafl/", features = ["default", "rand_trait"] } -libafl_bolts = { path = "../../libafl_bolts/" } -libafl_targets = { path = "../../libafl_targets/", features = ["sancov_pcguard_hitcounts", "libfuzzer", "sancov_cmplog"] } -serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib -lain = { version = "0.5", features = ["serde_support"], git = "https://github.com/AFLplusplus/lain.git", rev = "208e927bcf411f62f8a1f51ac2d9f9423a1ec5d3" } # We're using a lain fork compatible with libafl's rand version -# TODO Include it only when building cc -libafl_cc = { path = "../../libafl_cc/" } - -[lib] -name = "tutorial" -crate-type = ["staticlib"] diff --git a/libafl/Cargo.toml b/libafl/Cargo.toml index 96f41ae3c0..d7c1823a43 100644 --- a/libafl/Cargo.toml +++ b/libafl/Cargo.toml @@ -1,7 +1,10 @@ [package] name = "libafl" version.workspace = true -authors = ["Andrea Fioraldi ", "Dominik Maier "] +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] description = "Slot your own fuzzers together and extend their features using Rust" documentation = "https://docs.rs/libafl" repository = "https://github.com/AFLplusplus/LibAFL/" @@ -9,7 +12,14 @@ readme = "../README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "testing", "security"] edition = "2021" -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +rust-version = "1.82" +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] [package.metadata.docs.rs] features = ["document-features"] @@ -17,14 +27,38 @@ all-features = true rustc-args = ["--cfg", "docsrs"] [features] -default = ["std", "derive", "llmp_compression", "llmp_small_maps", "llmp_broker_timeouts", "rand_trait", "fork", "prelude", "gzip", "regex", "serdeany_autoreg", "tui_monitor", "libafl_bolts/xxh3"] +default = [ + "std", + "derive", + "llmp_compression", + "llmp_small_maps", + "llmp_broker_timeouts", + "rand_trait", + "fork", + "gzip", + "regex", + "serdeany_autoreg", + "libafl_bolts/xxh3", +] document-features = ["dep:document-features"] #! # Feature Flags #! ### General Features ## Enables features that need rust's `std` lib to work, like print, env, ... support -std = ["serde_json", "serde_json/std", "nix", "serde/std", "bincode", "wait-timeout", "uuid", "backtrace", "serial_test", "libafl_bolts/std", "typed-builder"] +std = [ + "serde_json", + "serde_json/std", + "dep:nix", + "serde/std", + "bincode", + "wait-timeout", + "uuid", + "backtrace", + "serial_test", + "libafl_bolts/std", + "typed-builder", +] ## Tracks the Feedbacks and the Objectives that were interesting for a Testcase track_hit_feedbacks = ["std"] @@ -64,17 +98,35 @@ tcp_manager = ["tokio", "std"] ## Enables compression for the TCP manager tcp_compression = ["tcp_manager", "libafl_bolts/gzip"] +## Enable multi-machine support +multi_machine = ["tokio", "std", "enumflags2", "ahash/std"] + ## Enables the `NaiveTokenizer` and `StacktraceObserver` regex = ["std", "dep:regex"] ## Enables deduplication based on `libcasr` for `StacktraceObserver` casr = ["libcasr", "std", "regex"] +## Intel Processor Trace +intel_pt = [ + "std", + "dep:libafl_intelpt", + "dep:libipt", + "dep:nix", + "dep:num_enum", +] + ## Enables features for corpus minimization cmin = ["z3"] ## Enables the `PrometheusMonitor` which will monitor stats via UDP, for `Grafana` and others. -prometheus_monitor = ["std", "async-std", "prometheus-client", "tide", "futures"] +prometheus_monitor = [ + "std", + "async-std", + "prometheus-client", + "tide", + "futures", +] ## Include a simple concolic mutator based on z3 concolic_mutation = ["z3"] @@ -91,7 +143,10 @@ multipart_inputs = ["arrayvec", "rand_trait"] #! ## LibAFL-Bolts Features ## Provide the `#[derive(SerdeAny)]` macro. -derive = ["libafl_derive", "libafl_bolts/derive"] # provide `derive(SerdeAny) macro. +derive = [ + "libafl_derive", + "libafl_bolts/derive", +] # provide `derive(SerdeAny) macro. ## Expose `libafl_bolts::cli` for easy commandline parsing of common fuzzer settings cli = ["libafl_bolts/cli"] @@ -125,84 +180,133 @@ llmp_compression = ["libafl_bolts/llmp_compression"] llmp_debug = ["std", "libafl_bolts/llmp_debug"] ## Reduces the initial map size for llmp -llmp_small_maps = ["libafl_bolts/llmp_small_maps"] # reduces initial map size for llmp +llmp_small_maps = [ + "libafl_bolts/llmp_small_maps", +] # reduces initial map size for llmp ## Grammar mutator. Requires nightly. -nautilus = ["std", "serde_json/std", "pyo3", "rand_trait", "regex-syntax", "regex"] +nautilus = [ + "std", + "serde_json/std", + "dep:pyo3", + "rand_trait", + "regex-syntax", + "regex", +] [build-dependencies] -rustversion = "1.0" +rustversion = "1.0.17" [dev-dependencies] -serde_json = { version = "1.0", default-features = false, features = ["alloc"] } +serde_json = { workspace = true, default-features = false, features = [ + "alloc", +] } # clippy-suggested optimised byte counter -bytecount = "0.6.3" +bytecount = "0.6.8" +static_assertions = { workspace = true } [dependencies] -libafl_bolts = { version = "0.13.0", path = "../libafl_bolts", default-features = false, features = ["alloc"] } -libafl_derive = { version = "0.13.0", path = "../libafl_derive", optional = true } +libafl_bolts = { workspace = true, features = ["alloc"] } +libafl_derive = { workspace = true, default-features = true, optional = true } +libafl_intelpt = { workspace = true, default-features = true, optional = true } -rustversion = "1.0" +rustversion = { workspace = true } tuple_list = { version = "0.1.3" } -hashbrown = { version = "0.14", features = ["serde", "ahash"], default-features = false } # A faster hashmap, nostd compatible -num-traits = { version = "0.2", default-features = false } -serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } # serialization lib -postcard = { version = "1.0", features = ["alloc"], default-features = false } # no_std compatible serde serialization format -bincode = { version = "1.3", optional = true } -c2rust-bitfields = { version = "0.18", features = ["no_std"] } -ahash = { version = "0.8", default-features = false } # The hash function already used in hashbrown -meminterval = { version = "0.4", features = ["serde"] } -backtrace = { version = "0.3", optional = true } # Used to get the stacktrace in StacktraceObserver -typed-builder = { version = "0.18", optional = true } # Implement the builder pattern at compiletime +hashbrown = { workspace = true, features = [ + "serde", + "ahash", +], default-features = false } # A faster hashmap, nostd compatible +num-traits = { workspace = true, default-features = false } +serde = { workspace = true, features = ["alloc"] } # serialization lib +postcard = { workspace = true } # no_std compatible serde serialization format +bincode = { version = "1.3.3", optional = true } +bitbybit = { workspace = true } +arbitrary-int = { workspace = true } +ahash = { workspace = true } # The hash function already used in hashbrown +meminterval = { workspace = true, features = ["serde"] } +backtrace = { workspace = true, optional = true } # Used to get the stacktrace in StacktraceObserver +typed-builder = { workspace = true, optional = true } # Implement the builder pattern at compiletime -serde_json = { version = "1.0", optional = true, default-features = false, features = ["alloc"] } -nix = { version = "0.29", optional = true } -regex = { version = "1", optional = true } -uuid = { version = "1.4", optional = true, features = ["serde", "v4"] } -libm = "0.2.2" -ratatui = { version = "0.26.3", default-features = false, features = ['crossterm'], optional = true } # Commandline rendering, for TUI Monitor -crossterm = { version = "0.27.0", optional = true } +serde_json = { workspace = true, optional = true, default-features = false, features = [ + "alloc", +] } +nix = { workspace = true, optional = true, features = [ + "signal", + "ptrace", + "personality", + "fs", +] } +regex = { workspace = true, optional = true } +uuid = { workspace = true, optional = true, features = ["serde", "v4"] } +libm = "0.2.8" +ratatui = { version = "0.29.0", default-features = false, features = [ + 'crossterm', +], optional = true } # Commandline rendering, for TUI Monitor +crossterm = { version = "0.28.1", optional = true } -prometheus-client = { version = "0.22", optional = true } # For the prometheus monitor +prometheus-client = { version = "0.22.3", optional = true } # For the prometheus monitor tide = { version = "0.16.0", optional = true } -async-std = { version = "1.12.0", features = ["attributes"], optional = true } -futures = { version = "0.3.24", optional = true } -log = "0.4.20" -tokio = { version = "1.28.1", optional = true, features = ["sync", "net", "rt", "io-util", "macros"] } # only used for TCP Event Manager right now +async-std = { version = "1.13.0", features = ["attributes"], optional = true } +futures = { version = "0.3.30", optional = true } +log = { workspace = true } +tokio = { version = "1.40.0", optional = true, features = [ + "sync", + "net", + "rt", + "io-util", + "macros", + "rt-multi-thread", + "time", +] } # used for TCP Event Manager and multi-machine +enumflags2 = { version = "0.7.10", optional = true } -wait-timeout = { version = "0.2", optional = true } # used by CommandExecutor to wait for child process +wait-timeout = { version = "0.2.0", optional = true } # used by CommandExecutor to wait for child process -concat-idents = { version = "1.1.3", optional = true } +concat-idents = { version = "1.1.5", optional = true } -libcasr = { version = "2.7", optional = true } +libcasr = { version = "2.12.1", optional = true } -bitvec = { version = "1.0", optional = true, features = ["serde"] } # used for string range storage +bitvec = { version = "1.0.1", optional = true, features = [ + "serde", +] } # used for string range storage -arrayvec = { version = "0.7.4", optional = true, default-features = false } # used for fixed-len collects +arrayvec = { version = "0.7.6", optional = true, default-features = false } # used for fixed-len collects -const_format = "0.2.32" # used for providing helpful compiler output -const_panic = "0.2.8" # similarly, for formatting const panic output +const_format = "0.2.33" # used for providing helpful compiler output +const_panic = "0.2.9" # similarly, for formatting const panic output -pyo3 = { version = "0.18.3", optional = true } # For nautilus -regex-syntax = { version = "0.8.3", optional = true } # For nautilus +pyo3 = { workspace = true, optional = true } +regex-syntax = { version = "0.8.4", optional = true } # For nautilus # optional-dev deps (change when target.'cfg(accessible(::std))'.test-dependencies will be stable) -serial_test = { version = "3", optional = true, default-features = false, features = ["logging"] } +serial_test = { workspace = true, optional = true, default-features = false, features = [ + "logging", +] } # Document all features of this crate (for `cargo doc`) -document-features = { version = "0.2", optional = true } +document-features = { workspace = true, optional = true } +# Optional +clap = { workspace = true, optional = true } +num_enum = { workspace = true, optional = true } +libipt = { workspace = true, optional = true } + +[lints] +workspace = true [target.'cfg(unix)'.dependencies] -libc = "0.2" # For (*nix) libc -z3 = { version = "0.12.0", optional = true } # for concolic mutation +libc = { workspace = true } # For (*nix) libc +z3 = { workspace = true, optional = true } # for concolic mutation [target.'cfg(windows)'.dependencies] -windows = { version = "0.51.1", features = ["Win32_Foundation", "Win32_System_Threading", "Win32_System_Diagnostics_Debug", "Win32_System_Kernel", "Win32_System_Memory", "Win32_Security", "Win32_System_SystemInformation"] } +windows = { workspace = true, features = [ + "Win32_Foundation", + "Win32_System_Threading", + "Win32_System_Diagnostics_Debug", + "Win32_System_Kernel", + "Win32_System_Memory", + "Win32_Security", + "Win32_System_SystemInformation", +] } [target.'cfg(windows)'.build-dependencies] -windows = "0.51.1" - -#[profile.release] -#lto = true -#opt-level = 3 -#debug = true +windows = { workspace = true } diff --git a/libafl/src/common/nautilus/README.md b/libafl/src/common/nautilus/README.md index a052afad63..9f91e27329 100644 --- a/libafl/src/common/nautilus/README.md +++ b/libafl/src/common/nautilus/README.md @@ -3,7 +3,7 @@ Nautilus is a coverage guided, grammar-based mutator. You can use it to improve your test coverage and find more bugs. By specifying the grammar of semi-valid inputs, Nautilus is able to perform complex mutation and to uncover more interesting test cases. Many of the ideas behind the original fuzzer are documented in a paper published at NDSS 2019.

- +

Version 2.0 has added many improvements to this early prototype. diff --git a/libafl/src/common/nautilus/grammartec/context.rs b/libafl/src/common/nautilus/grammartec/context.rs index 77128ca573..dc6c9badc9 100644 --- a/libafl/src/common/nautilus/grammartec/context.rs +++ b/libafl/src/common/nautilus/grammartec/context.rs @@ -1,7 +1,11 @@ use alloc::{borrow::ToOwned, string::String, vec::Vec}; +use core::num::NonZero; use hashbrown::HashMap; -use libafl_bolts::rands::{Rand, RomuDuoJrRand}; +use libafl_bolts::{ + nonzero, + rands::{Rand, RomuDuoJrRand}, +}; use pyo3::prelude::PyObject; use super::{ @@ -63,12 +67,12 @@ impl Context { #[must_use] pub fn get_nt(&self, r: &RuleIdOrCustom) -> NTermId { - return self.get_rule(r.id()).nonterm(); + self.get_rule(r.id()).nonterm() } #[must_use] pub fn get_num_children(&self, r: &RuleIdOrCustom) -> usize { - return self.get_rule(r.id()).number_of_nonterms(); + self.get_rule(r.id()).number_of_nonterms() } pub fn add_rule(&mut self, nt: &str, format: &[u8]) -> RuleId { @@ -115,10 +119,10 @@ impl Context { #[must_use] pub fn nt_id(&self, nt: &str) -> NTermId { - return *self + *self .names_to_nt_id .get(nt) - .expect(&("no such nonterminal: ".to_owned() + nt)); + .expect(&("no such nonterminal: ".to_owned() + nt)) } #[must_use] @@ -258,12 +262,13 @@ impl Context { max_len: usize, nt: NTermId, p_include_short_rules: usize, - ) -> impl Iterator + 'a { + ) -> impl Iterator + 'a { self.nts_to_rules[&nt] .iter() .take_while(move |r| self.rules_to_min_size[*r] <= max_len) .filter(move |r| { - self.rules_to_num_options[*r] > 1 || rand.below(100) <= p_include_short_rules + self.rules_to_num_options[*r] > 1 + || rand.below(nonzero!(100)) <= p_include_short_rules }) } diff --git a/libafl/src/common/nautilus/grammartec/mod.rs b/libafl/src/common/nautilus/grammartec/mod.rs index bf15649903..7d541b7880 100644 --- a/libafl/src/common/nautilus/grammartec/mod.rs +++ b/libafl/src/common/nautilus/grammartec/mod.rs @@ -2,6 +2,8 @@ pub mod chunkstore; pub mod context; pub mod mutator; pub mod newtypes; +#[cfg(feature = "nautilus")] +pub mod python_grammar_loader; pub mod recursion_info; pub mod rule; pub mod tree; diff --git a/libafl/src/common/nautilus/grammartec/mutator.rs b/libafl/src/common/nautilus/grammartec/mutator.rs index e9c3a5242d..2ec854f79c 100644 --- a/libafl/src/common/nautilus/grammartec/mutator.rs +++ b/libafl/src/common/nautilus/grammartec/mutator.rs @@ -1,6 +1,7 @@ use alloc::vec::Vec; -use std::{collections::HashSet, mem}; +use core::{mem, num::NonZero}; +use hashbrown::HashSet; use libafl_bolts::{rands::Rand, Error}; use crate::common::nautilus::grammartec::{ @@ -144,7 +145,10 @@ impl Mutator { where F: FnMut(&TreeMutation, &Context) -> Result<(), Error>, { - let n = NodeId::from(rand.below(tree.size())); + let Some(tree_size) = NonZero::new(tree.size()) else { + return Err(Error::illegal_argument("Empty tree in mut_splice")); + }; + let n = NodeId::from(rand.below(tree_size)); let old_rule_id = tree.get_rule_id(n); if let Some((repl_tree, repl_node)) = cks.get_alternative_to(rand, old_rule_id, ctx) { let repl = tree.mutate_replace_from_tree(n, repl_tree, repl_node); @@ -185,7 +189,10 @@ impl Mutator { where F: FnMut(&TreeMutation, &Context) -> Result<(), Error>, { - let n = NodeId::from(rand.below(tree.size())); + let Some(tree_size) = NonZero::new(tree.size()) else { + return Err(Error::illegal_argument("Empty tree in mut_random")); + }; + let n = NodeId::from(rand.below(tree_size)); let nterm = tree.get_rule(n, ctx).nonterm(); if ctx.check_if_nterm_has_multiple_possiblities(&nterm) { let len = ctx.get_random_len_for_nt(&nterm); @@ -310,8 +317,9 @@ mod tests { string::{String, ToString}, vec::Vec, }; - use std::{collections::HashSet, str}; + use core::str; + use hashbrown::HashSet; use libafl_bolts::rands::StdRand; use crate::{ diff --git a/libafl/src/common/nautilus/grammartec/python_grammar_loader.rs b/libafl/src/common/nautilus/grammartec/python_grammar_loader.rs new file mode 100644 index 0000000000..75d1119d88 --- /dev/null +++ b/libafl/src/common/nautilus/grammartec/python_grammar_loader.rs @@ -0,0 +1,64 @@ +use std::{string::String, vec::Vec}; + +use pyo3::{prelude::*, pyclass, types::IntoPyDict}; + +use crate::{nautilus::grammartec::context::Context, Error}; + +#[pyclass] +struct PyContext { + ctx: Context, +} +impl PyContext { + fn get_context(&self) -> Context { + self.ctx.clone() + } +} + +#[pymethods] +impl PyContext { + #[new] + fn new() -> Self { + PyContext { + ctx: Context::new(), + } + } + + fn rule(&mut self, py: Python, nt: &str, format: &Bound) -> PyResult<()> { + if let Ok(s) = format.extract::<&str>() { + self.ctx.add_rule(nt, s.as_bytes()); + } else if let Ok(s) = format.extract::<&[u8]>() { + self.ctx.add_rule(nt, s); + } else { + return Err(pyo3::exceptions::PyValueError::new_err( + "format argument should be string or bytes", + )); + } + Ok(()) + } + + #[allow(clippy::needless_pass_by_value)] + fn script(&mut self, nt: &str, nts: Vec, script: PyObject) { + self.ctx.add_script(nt, &nts, script); + } + + fn regex(&mut self, nt: &str, regex: &str) { + self.ctx.add_regex(nt, regex); + } +} + +fn loader(py: Python, grammar: &str) -> PyResult { + let py_ctx = Bound::new(py, PyContext::new())?; + let locals = [("ctx", &py_ctx)].into_py_dict_bound(py); + py.run_bound(grammar, None, Some(&locals))?; + Ok(py_ctx.borrow().get_context()) +} + +/// Create a `NautilusContext` from a python grammar file +#[must_use] +pub fn load_python_grammar(grammar: &str) -> Context { + Python::with_gil(|py| { + loader(py, grammar) + .map_err(|e| e.print_and_set_sys_last_vars(py)) + .expect("failed to parse python grammar") + }) +} diff --git a/libafl/src/common/nautilus/grammartec/recursion_info.rs b/libafl/src/common/nautilus/grammartec/recursion_info.rs index 230d6a5afb..49d7e5c99d 100644 --- a/libafl/src/common/nautilus/grammartec/recursion_info.rs +++ b/libafl/src/common/nautilus/grammartec/recursion_info.rs @@ -2,7 +2,10 @@ use alloc::vec::Vec; use std::fmt; use hashbrown::HashMap; -use libafl_bolts::rands::{loaded_dice::LoadedDiceSampler, Rand}; +use libafl_bolts::{ + rands::{loaded_dice::LoadedDiceSampler, Rand}, + Error, +}; use crate::common::nautilus::grammartec::{ context::Context, @@ -32,7 +35,8 @@ impl RecursionInfo { pub fn new(t: &Tree, n: NTermId, ctx: &Context) -> Option { let (recursive_parents, node_by_offset, depth_by_offset) = RecursionInfo::find_parents(t, n, ctx)?; - let sampler = RecursionInfo::build_sampler(&depth_by_offset); + let sampler = RecursionInfo::build_sampler(&depth_by_offset) + .expect("Sampler depth_by_offset invalid"); Some(Self { recursive_parents, sampler, @@ -79,7 +83,7 @@ impl RecursionInfo { } #[allow(clippy::cast_precision_loss)] - fn build_sampler(depths: &[usize]) -> LoadedDiceSampler { + fn build_sampler(depths: &[usize]) -> Result { let mut weights = depths.iter().map(|x| *x as f64).collect::>(); let norm: f64 = weights.iter().sum(); assert!(norm > 0.0); diff --git a/libafl/src/common/nautilus/grammartec/rule.rs b/libafl/src/common/nautilus/grammartec/rule.rs index 74baf72cc5..7df06852b0 100644 --- a/libafl/src/common/nautilus/grammartec/rule.rs +++ b/libafl/src/common/nautilus/grammartec/rule.rs @@ -166,11 +166,11 @@ impl Rule { nterms: &[String], script: PyObject, ) -> Self { - return Self::Script(ScriptRule { + Self::Script(ScriptRule { nonterm: ctx.aquire_nt_id(nonterm), nonterms: nterms.iter().map(|s| ctx.aquire_nt_id(s)).collect(), script, - }); + }) } pub fn from_regex(ctx: &mut Context, nonterm: &str, regex: &str) -> Self { @@ -260,7 +260,7 @@ impl Rule { // RegExp Changed from (\{[^}\\]+\})|((?:[^{\\]|\\\{|\\\}|\\\\)+) because of problems with \\ (\\ was not matched and therefore thrown away) }); - return tokenizer + tokenizer .captures_iter(format) .map(|cap| { if let Some(sub) = cap.get(1) { @@ -276,7 +276,7 @@ impl Rule { unreachable!() } }) - .collect::>(); + .collect::>() } #[must_use] diff --git a/libafl/src/common/nautilus/grammartec/tree.rs b/libafl/src/common/nautilus/grammartec/tree.rs index dd697d858b..30ea6992df 100644 --- a/libafl/src/common/nautilus/grammartec/tree.rs +++ b/libafl/src/common/nautilus/grammartec/tree.rs @@ -1,10 +1,11 @@ use alloc::vec::Vec; -use std::{cmp, collections::HashSet, io, io::Write, marker::Sized}; +use std::{cmp, io, io::Write, marker::Sized}; +use hashbrown::HashSet; use libafl_bolts::rands::Rand; use pyo3::{ prelude::{PyObject, PyResult, Python}, - types::{PyBytes, PyString, PyTuple}, + types::{PyAnyMethods, PyBytes, PyBytesMethods, PyString, PyStringMethods, PyTuple}, FromPyObject, PyTypeInfo, }; use serde::{Deserialize, Serialize}; @@ -84,13 +85,14 @@ impl<'data, 'tree: 'data, 'ctx: 'data, W: Write, T: TreeLike> Unparser<'data, 't .into_iter() .map(io::Cursor::into_inner) .collect::>(); - let byte_arrays = bufs.iter().map(|b| PyBytes::new(py, b)); - let res = expr.call1(py, PyTuple::new(py, byte_arrays))?; - if PyString::is_type_of(res.as_ref(py)) { - let pystr = <&PyString>::extract(res.as_ref(py))?; + let byte_arrays = bufs.iter().map(|b| PyBytes::new_bound(py, b)); + let res = expr.call1(py, PyTuple::new_bound(py, byte_arrays))?; + let bound = res.bind(py); + if PyString::is_type_of_bound(bound) { + let pystr = bound.downcast::()?; self.write(pystr.to_string_lossy().as_bytes()); - } else if PyBytes::is_type_of(res.as_ref(py)) { - let pybytes = <&PyBytes>::extract(res.as_ref(py))?; + } else if PyBytes::is_type_of_bound(bound) { + let pybytes = bound.downcast::()?; self.write(pybytes.as_bytes()); } else { return Err(pyo3::exceptions::PyValueError::new_err( @@ -256,11 +258,11 @@ impl Tree { ) -> TreeMutation<'a> { let old_size = self.subtree_size(n); let new_size = other.subtree_size(other_node); - return TreeMutation { + TreeMutation { prefix: self.slice(0.into(), n), repl: other.slice(other_node, other_node + new_size), postfix: self.slice(n + old_size, self.rules.len().into()), - }; + } } fn calc_subtree_sizes_and_parents(&mut self, ctx: &Context) { @@ -280,7 +282,8 @@ impl Tree { for i in 0..self.size() { let node_id = NodeId::from(i); let nonterm = self.get_rule(node_id, ctx).nonterm(); - //sanity check + + // This should never panic! let (nterm_id, node) = stack.pop().expect("Not a valid tree for unparsing!"); if nterm_id == nonterm { self.paren[i] = node; @@ -431,7 +434,7 @@ impl<'a> TreeMutation<'a> { } } -impl<'a> TreeLike for TreeMutation<'a> { +impl TreeLike for TreeMutation<'_> { fn get_rule_id(&self, n: NodeId) -> RuleId { self.get_at(n).id() } @@ -452,7 +455,7 @@ impl<'a> TreeLike for TreeMutation<'a> { } fn get_rule<'c>(&self, n: NodeId, ctx: &'c Context) -> &'c Rule { - return ctx.get_rule(self.get_rule_id(n)); + ctx.get_rule(self.get_rule_id(n)) } fn get_custom_rule_data(&self, n: NodeId) -> &[u8] { self.get_at(n).data() diff --git a/libafl/src/common/nautilus/regex_mutator/mod.rs b/libafl/src/common/nautilus/regex_mutator/mod.rs index 451d0504d6..b7ece90ba3 100644 --- a/libafl/src/common/nautilus/regex_mutator/mod.rs +++ b/libafl/src/common/nautilus/regex_mutator/mod.rs @@ -1,4 +1,5 @@ use alloc::vec::Vec; +use core::num::NonZero; use libafl_bolts::rands::Rand; use regex_syntax::hir::{Class, ClassBytesRange, ClassUnicodeRange, Hir, Literal}; @@ -22,10 +23,12 @@ impl RegexScript { } pub fn get_mod(&mut self, rand: &mut R, val: usize) -> usize { - if self.remaining == 0 { + if self.remaining == 0 || val == 0 { 0 } else { - rand.below(val) + // # Safety + // This is checked above to be non-null. + rand.below(unsafe { NonZero::new(val).unwrap_unchecked() }) } } @@ -50,6 +53,7 @@ fn append_unicode_range( cls: ClassUnicodeRange, ) { let mut chr_a_buf = [0; 4]; + #[allow(clippy::similar_names)] let mut chr_b_buf = [0; 4]; cls.start().encode_utf8(&mut chr_a_buf); cls.end().encode_utf8(&mut chr_b_buf); @@ -126,10 +130,10 @@ pub fn generate(rand: &mut R, hir: &Hir) -> Vec { HirKind::Empty => {} HirKind::Literal(lit) => append_lit(&mut res, lit), HirKind::Class(cls) => append_class(rand, &mut res, &mut scr, cls), - HirKind::Repetition(rep) => { - let num = get_repetitions(rand, rep.min, rep.max, &mut scr); + HirKind::Repetition(repetition) => { + let num = get_repetitions(rand, repetition.min, repetition.max, &mut scr); for _ in 0..num { - stack.push(&rep.sub); + stack.push(&repetition.sub); } } HirKind::Capture(grp) => stack.push(&grp.sub), diff --git a/libafl/src/corpus/cached.rs b/libafl/src/corpus/cached.rs index 3b2f10857c..dfa7c15f34 100644 --- a/libafl/src/corpus/cached.rs +++ b/libafl/src/corpus/cached.rs @@ -11,7 +11,7 @@ use crate::{ inmemory_ondisk::InMemoryOnDiskCorpus, ondisk::OnDiskMetadataFormat, Corpus, CorpusId, HasTestcase, Testcase, }, - inputs::{Input, UsesInput}, + inputs::Input, Error, }; @@ -20,23 +20,12 @@ use crate::{ /// The eviction policy is FIFO. #[cfg(feature = "std")] #[derive(Default, Serialize, Deserialize, Clone, Debug)] -#[serde(bound = "I: serde::de::DeserializeOwned")] -pub struct CachedOnDiskCorpus -where - I: Input, -{ +pub struct CachedOnDiskCorpus { inner: InMemoryOnDiskCorpus, cached_indexes: RefCell>, cache_max_len: usize, } -impl UsesInput for CachedOnDiskCorpus -where - I: Input, -{ - type Input = I; -} - impl CachedOnDiskCorpus where I: Input, @@ -44,7 +33,7 @@ where fn cache_testcase<'a>( &'a self, testcase: &'a RefCell>, - idx: CorpusId, + id: CorpusId, ) -> Result<(), Error> { if testcase.borrow().input().is_none() { self.load_input_into(&mut testcase.borrow_mut())?; @@ -62,7 +51,7 @@ where } } } - self.cached_indexes.borrow_mut().push_back(idx); + self.cached_indexes.borrow_mut().push_back(id); } Ok(()) } @@ -71,6 +60,8 @@ impl Corpus for CachedOnDiskCorpus where I: Input, { + type Input = I; + /// Returns the number of all enabled entries #[inline] fn count(&self) -> usize { @@ -102,30 +93,30 @@ where /// Replaces the testcase at the given idx #[inline] - fn replace(&mut self, idx: CorpusId, testcase: Testcase) -> Result, Error> { + fn replace(&mut self, id: CorpusId, testcase: Testcase) -> Result, Error> { // TODO finish - self.inner.replace(idx, testcase) + self.inner.replace(id, testcase) } /// Removes an entry from the corpus, returning it if it was present; considers both enabled and disabled testcases. - fn remove(&mut self, idx: CorpusId) -> Result, Error> { - let testcase = self.inner.remove(idx)?; - self.cached_indexes.borrow_mut().retain(|e| *e != idx); + fn remove(&mut self, id: CorpusId) -> Result, Error> { + let testcase = self.inner.remove(id)?; + self.cached_indexes.borrow_mut().retain(|e| *e != id); Ok(testcase) } /// Get by id; considers only enabled testcases #[inline] - fn get(&self, idx: CorpusId) -> Result<&RefCell>, Error> { - let testcase = { self.inner.get(idx)? }; - self.cache_testcase(testcase, idx)?; + fn get(&self, id: CorpusId) -> Result<&RefCell>, Error> { + let testcase = { self.inner.get(id)? }; + self.cache_testcase(testcase, id)?; Ok(testcase) } /// Get by id; considers both enabled and disabled testcases #[inline] - fn get_from_all(&self, idx: CorpusId) -> Result<&RefCell>, Error> { - let testcase = { self.inner.get_from_all(idx)? }; - self.cache_testcase(testcase, idx)?; + fn get_from_all(&self, id: CorpusId) -> Result<&RefCell>, Error> { + let testcase = { self.inner.get_from_all(id)? }; + self.cache_testcase(testcase, id)?; Ok(testcase) } @@ -142,8 +133,8 @@ where } #[inline] - fn next(&self, idx: CorpusId) -> Option { - self.inner.next(idx) + fn next(&self, id: CorpusId) -> Option { + self.inner.next(id) } /// Peek the next free corpus id @@ -153,8 +144,8 @@ where } #[inline] - fn prev(&self, idx: CorpusId) -> Option { - self.inner.prev(idx) + fn prev(&self, id: CorpusId) -> Option { + self.inner.prev(id) } #[inline] @@ -193,22 +184,16 @@ impl HasTestcase for CachedOnDiskCorpus where I: Input, { - fn testcase(&self, id: CorpusId) -> Result>, Error> { + fn testcase(&self, id: CorpusId) -> Result>, Error> { Ok(self.get(id)?.borrow()) } - fn testcase_mut( - &self, - id: CorpusId, - ) -> Result>, Error> { + fn testcase_mut(&self, id: CorpusId) -> Result>, Error> { Ok(self.get(id)?.borrow_mut()) } } -impl CachedOnDiskCorpus -where - I: Input, -{ +impl CachedOnDiskCorpus { /// Creates the [`CachedOnDiskCorpus`]. /// /// This corpus stores (and reads) all testcases to/from disk diff --git a/libafl/src/corpus/inmemory.rs b/libafl/src/corpus/inmemory.rs index 8ada3b2149..4af250506c 100644 --- a/libafl/src/corpus/inmemory.rs +++ b/libafl/src/corpus/inmemory.rs @@ -8,18 +8,13 @@ use serde::{Deserialize, Serialize}; use super::HasTestcase; use crate::{ corpus::{Corpus, CorpusId, Testcase}, - inputs::{Input, UsesInput}, Error, }; /// Keep track of the stored `Testcase` and the siblings ids (insertion order) #[cfg(not(feature = "corpus_btreemap"))] #[derive(Debug, Clone, Serialize, Deserialize)] -#[serde(bound = "I: serde::de::DeserializeOwned")] -pub struct TestcaseStorageItem -where - I: Input, -{ +pub struct TestcaseStorageItem { /// The stored testcase pub testcase: RefCell>, /// Previously inserted id @@ -30,11 +25,7 @@ where /// The map type in which testcases are stored (disable the feature `corpus_btreemap` to use a `HashMap` instead of `BTreeMap`) #[derive(Default, Serialize, Deserialize, Clone, Debug)] -#[serde(bound = "I: serde::de::DeserializeOwned")] -pub struct TestcaseStorageMap -where - I: Input, -{ +pub struct TestcaseStorageMap { #[cfg(not(feature = "corpus_btreemap"))] /// A map of `CorpusId` to `TestcaseStorageItem` pub map: hashbrown::HashMap>, @@ -43,18 +34,15 @@ where pub map: alloc::collections::btree_map::BTreeMap>>, /// The keys in order (use `Vec::binary_search`) pub keys: Vec, - /// First inserted idx + /// First inserted id #[cfg(not(feature = "corpus_btreemap"))] - first_idx: Option, - /// Last inserted idx + first_id: Option, + /// Last inserted id #[cfg(not(feature = "corpus_btreemap"))] - last_idx: Option, + last_id: Option, } -impl TestcaseStorageMap -where - I: Input, -{ +impl TestcaseStorageMap { /// Insert a key in the keys set fn insert_key(&mut self, id: CorpusId) { if let Err(idx) = self.keys.binary_search(&id) { @@ -71,8 +59,8 @@ where /// Replace a testcase given a `CorpusId` #[cfg(not(feature = "corpus_btreemap"))] - pub fn replace(&mut self, idx: CorpusId, testcase: Testcase) -> Option> { - if let Some(entry) = self.map.get_mut(&idx) { + pub fn replace(&mut self, id: CorpusId, testcase: Testcase) -> Option> { + if let Some(entry) = self.map.get_mut(&id) { Some(entry.testcase.replace(testcase)) } else { None @@ -81,26 +69,26 @@ where /// Replace a testcase given a `CorpusId` #[cfg(feature = "corpus_btreemap")] - pub fn replace(&mut self, idx: CorpusId, testcase: Testcase) -> Option> { - self.map.get_mut(&idx).map(|entry| entry.replace(testcase)) + pub fn replace(&mut self, id: CorpusId, testcase: Testcase) -> Option> { + self.map.get_mut(&id).map(|entry| entry.replace(testcase)) } - /// Remove a testcase given a `CorpusId` + /// Remove a testcase given a [`CorpusId`] #[cfg(not(feature = "corpus_btreemap"))] - pub fn remove(&mut self, idx: CorpusId) -> Option>> { - if let Some(item) = self.map.remove(&idx) { - self.remove_key(idx); + pub fn remove(&mut self, id: CorpusId) -> Option>> { + if let Some(item) = self.map.remove(&id) { + self.remove_key(id); if let Some(prev) = item.prev { self.map.get_mut(&prev).unwrap().next = item.next; } else { // first elem - self.first_idx = item.next; + self.first_id = item.next; } if let Some(next) = item.next { self.map.get_mut(&next).unwrap().prev = item.prev; } else { // last elem - self.last_idx = item.prev; + self.last_id = item.prev; } Some(item.testcase) } else { @@ -108,32 +96,32 @@ where } } - /// Remove a testcase given a `CorpusId` + /// Remove a testcase given a [`CorpusId`] #[cfg(feature = "corpus_btreemap")] - pub fn remove(&mut self, idx: CorpusId) -> Option>> { - self.remove_key(idx); - self.map.remove(&idx) + pub fn remove(&mut self, id: CorpusId) -> Option>> { + self.remove_key(id); + self.map.remove(&id) } /// Get a testcase given a `CorpusId` #[cfg(not(feature = "corpus_btreemap"))] #[must_use] - pub fn get(&self, idx: CorpusId) -> Option<&RefCell>> { - self.map.get(&idx).as_ref().map(|x| &x.testcase) + pub fn get(&self, id: CorpusId) -> Option<&RefCell>> { + self.map.get(&id).as_ref().map(|x| &x.testcase) } /// Get a testcase given a `CorpusId` #[cfg(feature = "corpus_btreemap")] #[must_use] - pub fn get(&self, idx: CorpusId) -> Option<&RefCell>> { - self.map.get(&idx) + pub fn get(&self, id: CorpusId) -> Option<&RefCell>> { + self.map.get(&id) } /// Get the next id given a `CorpusId` (creation order) #[cfg(not(feature = "corpus_btreemap"))] #[must_use] - pub fn next(&self, idx: CorpusId) -> Option { - if let Some(item) = self.map.get(&idx) { + pub fn next(&self, id: CorpusId) -> Option { + if let Some(item) = self.map.get(&id) { item.next } else { None @@ -143,13 +131,13 @@ where /// Get the next id given a `CorpusId` (creation order) #[cfg(feature = "corpus_btreemap")] #[must_use] - pub fn next(&self, idx: CorpusId) -> Option { + pub fn next(&self, id: CorpusId) -> Option { // TODO see if using self.keys is faster let mut range = self .map - .range((core::ops::Bound::Included(idx), core::ops::Bound::Unbounded)); + .range((core::ops::Bound::Included(id), core::ops::Bound::Unbounded)); if let Some((this_id, _)) = range.next() { - if idx != *this_id { + if id != *this_id { return None; } } @@ -163,8 +151,8 @@ where /// Get the previous id given a `CorpusId` (creation order) #[cfg(not(feature = "corpus_btreemap"))] #[must_use] - pub fn prev(&self, idx: CorpusId) -> Option { - if let Some(item) = self.map.get(&idx) { + pub fn prev(&self, id: CorpusId) -> Option { + if let Some(item) = self.map.get(&id) { item.prev } else { None @@ -174,13 +162,13 @@ where /// Get the previous id given a `CorpusId` (creation order) #[cfg(feature = "corpus_btreemap")] #[must_use] - pub fn prev(&self, idx: CorpusId) -> Option { + pub fn prev(&self, id: CorpusId) -> Option { // TODO see if using self.keys is faster let mut range = self .map - .range((core::ops::Bound::Unbounded, core::ops::Bound::Included(idx))); + .range((core::ops::Bound::Unbounded, core::ops::Bound::Included(id))); if let Some((this_id, _)) = range.next_back() { - if idx != *this_id { + if id != *this_id { return None; } } @@ -195,7 +183,7 @@ where #[cfg(not(feature = "corpus_btreemap"))] #[must_use] pub fn first(&self) -> Option { - self.first_idx + self.first_id } /// Get the first created id @@ -209,7 +197,7 @@ where #[cfg(not(feature = "corpus_btreemap"))] #[must_use] pub fn last(&self) -> Option { - self.last_idx + self.last_id } /// Get the last created id @@ -227,98 +215,85 @@ where map: alloc::collections::BTreeMap::default(), keys: Vec::default(), #[cfg(not(feature = "corpus_btreemap"))] - first_idx: None, + first_id: None, #[cfg(not(feature = "corpus_btreemap"))] - last_idx: None, + last_id: None, } } } /// Storage map for the testcases (used in `Corpus` implementations) with an incremental index #[derive(Default, Serialize, Deserialize, Clone, Debug)] -#[serde(bound = "I: serde::de::DeserializeOwned")] -pub struct TestcaseStorage -where - I: Input, -{ +pub struct TestcaseStorage { /// The map in which enabled testcases are stored pub enabled: TestcaseStorageMap, /// The map in which disabled testcases are stored pub disabled: TestcaseStorageMap, - /// The progressive idx for both maps - progressive_idx: usize, + /// The progressive id for both maps + progressive_id: usize, } -impl UsesInput for TestcaseStorage -where - I: Input, -{ - type Input = I; -} - -impl TestcaseStorage -where - I: Input, -{ +impl TestcaseStorage { /// Insert a testcase assigning a `CorpusId` to it pub fn insert(&mut self, testcase: RefCell>) -> CorpusId { - self._insert(testcase, false) + self.insert_inner(testcase, false) } #[must_use] /// Peek the next free corpus id pub fn peek_free_id(&self) -> CorpusId { - CorpusId::from(self.progressive_idx) + CorpusId::from(self.progressive_id) } /// Insert a testcase assigning a `CorpusId` to it pub fn insert_disabled(&mut self, testcase: RefCell>) -> CorpusId { - self._insert(testcase, true) + self.insert_inner(testcase, true) } + /// Insert a testcase assigning a `CorpusId` to it #[cfg(not(feature = "corpus_btreemap"))] - fn _insert(&mut self, testcase: RefCell>, is_disabled: bool) -> CorpusId { - let idx = CorpusId::from(self.progressive_idx); - self.progressive_idx += 1; + fn insert_inner(&mut self, testcase: RefCell>, is_disabled: bool) -> CorpusId { + let id = CorpusId::from(self.progressive_id); + self.progressive_id += 1; let corpus = if is_disabled { &mut self.disabled } else { &mut self.enabled }; - let prev = if let Some(last_idx) = corpus.last_idx { - corpus.map.get_mut(&last_idx).unwrap().next = Some(idx); - Some(last_idx) + let prev = if let Some(last_id) = corpus.last_id { + corpus.map.get_mut(&last_id).unwrap().next = Some(id); + Some(last_id) } else { None }; - if corpus.first_idx.is_none() { - corpus.first_idx = Some(idx); + if corpus.first_id.is_none() { + corpus.first_id = Some(id); } - corpus.last_idx = Some(idx); - corpus.insert_key(idx); + corpus.last_id = Some(id); + corpus.insert_key(id); corpus.map.insert( - idx, + id, TestcaseStorageItem { testcase, prev, next: None, }, ); - idx + id } /// Insert a testcase assigning a `CorpusId` to it #[cfg(feature = "corpus_btreemap")] - fn _insert(&mut self, testcase: RefCell>, is_disabled: bool) -> CorpusId { - let idx = CorpusId::from(self.progressive_idx); - self.progressive_idx += 1; + fn insert_inner(&mut self, testcase: RefCell>, is_disabled: bool) -> CorpusId { + let id = CorpusId::from(self.progressive_id); + self.progressive_id += 1; let corpus = if is_disabled { &mut self.disabled } else { &mut self.enabled }; - corpus.insert_key(idx); - corpus.map.insert(idx, testcase); - idx + corpus.insert_key(id); + corpus.map.insert(id, testcase); + id } /// Create new `TestcaseStorage` @@ -327,33 +302,21 @@ where Self { enabled: TestcaseStorageMap::new(), disabled: TestcaseStorageMap::new(), - progressive_idx: 0, + progressive_id: 0, } } } /// A corpus handling all in memory. #[derive(Default, Serialize, Deserialize, Clone, Debug)] -#[serde(bound = "I: serde::de::DeserializeOwned")] -pub struct InMemoryCorpus -where - I: Input, -{ +pub struct InMemoryCorpus { storage: TestcaseStorage, current: Option, } -impl UsesInput for InMemoryCorpus -where - I: Input, -{ +impl Corpus for InMemoryCorpus { type Input = I; -} -impl Corpus for InMemoryCorpus -where - I: Input, -{ /// Returns the number of all enabled entries #[inline] fn count(&self) -> usize { @@ -387,43 +350,42 @@ where Ok(self.storage.insert_disabled(RefCell::new(testcase))) } - /// Replaces the testcase at the given idx + /// Replaces the testcase at the given id #[inline] - fn replace(&mut self, idx: CorpusId, testcase: Testcase) -> Result, Error> { - self.storage - .enabled - .replace(idx, testcase) - .ok_or_else(|| Error::key_not_found(format!("Index {idx} not found"))) + fn replace(&mut self, id: CorpusId, testcase: Testcase) -> Result, Error> { + self.storage.enabled.replace(id, testcase).ok_or_else(|| { + Error::key_not_found(format!("Index {id} not found, could not replace.")) + }) } /// Removes an entry from the corpus, returning it if it was present; considers both enabled and disabled testcases #[inline] - fn remove(&mut self, idx: CorpusId) -> Result, Error> { - let mut testcase = self.storage.enabled.remove(idx); + fn remove(&mut self, id: CorpusId) -> Result, Error> { + let mut testcase = self.storage.enabled.remove(id); if testcase.is_none() { - testcase = self.storage.disabled.remove(idx); + testcase = self.storage.disabled.remove(id); } testcase .map(|x| x.take()) - .ok_or_else(|| Error::key_not_found(format!("Index {idx} not found"))) + .ok_or_else(|| Error::key_not_found(format!("Index {id} not found"))) } /// Get by id; considers only enabled testcases #[inline] - fn get(&self, idx: CorpusId) -> Result<&RefCell>, Error> { + fn get(&self, id: CorpusId) -> Result<&RefCell>, Error> { self.storage .enabled - .get(idx) - .ok_or_else(|| Error::key_not_found(format!("Index {idx} not found"))) + .get(id) + .ok_or_else(|| Error::key_not_found(format!("Index {id} not found"))) } /// Get by id; considers both enabled and disabled testcases #[inline] - fn get_from_all(&self, idx: CorpusId) -> Result<&RefCell>, Error> { - let mut testcase = self.storage.enabled.get(idx); + fn get_from_all(&self, id: CorpusId) -> Result<&RefCell>, Error> { + let mut testcase = self.storage.enabled.get(id); if testcase.is_none() { - testcase = self.storage.disabled.get(idx); + testcase = self.storage.disabled.get(id); } - testcase.ok_or_else(|| Error::key_not_found(format!("Index {idx} not found"))) + testcase.ok_or_else(|| Error::key_not_found(format!("Index {id} not found"))) } /// Current testcase scheduled @@ -445,13 +407,13 @@ where } #[inline] - fn next(&self, idx: CorpusId) -> Option { - self.storage.enabled.next(idx) + fn next(&self, id: CorpusId) -> Option { + self.storage.enabled.next(id) } #[inline] - fn prev(&self, idx: CorpusId) -> Option { - self.storage.enabled.prev(idx) + fn prev(&self, id: CorpusId) -> Option { + self.storage.enabled.prev(id) } #[inline] @@ -492,29 +454,23 @@ where } } -impl HasTestcase for InMemoryCorpus -where - I: Input, -{ +impl HasTestcase for InMemoryCorpus { fn testcase( &self, id: CorpusId, - ) -> Result::Input>>, Error> { + ) -> Result::Input>>, Error> { Ok(self.get(id)?.borrow()) } fn testcase_mut( &self, id: CorpusId, - ) -> Result::Input>>, Error> { + ) -> Result::Input>>, Error> { Ok(self.get(id)?.borrow_mut()) } } -impl InMemoryCorpus -where - I: Input, -{ +impl InMemoryCorpus { /// Creates a new [`InMemoryCorpus`], keeping all [`Testcase`]`s` in memory. /// This is the simplest and fastest option, however test progress will be lost on exit or on OOM. #[must_use] diff --git a/libafl/src/corpus/inmemory_ondisk.rs b/libafl/src/corpus/inmemory_ondisk.rs index fe4cf37ef2..58be73441d 100644 --- a/libafl/src/corpus/inmemory_ondisk.rs +++ b/libafl/src/corpus/inmemory_ondisk.rs @@ -1,4 +1,5 @@ //! The [`InMemoryOnDiskCorpus`] stores [`Testcase`]s to disk. +//! //! Additionally, _all_ of them are kept in memory. //! For a lower memory footprint, consider using [`crate::corpus::CachedOnDiskCorpus`] //! which only stores a certain number of [`Testcase`]s and removes additional ones in a FIFO manner. @@ -9,6 +10,7 @@ use core::cell::RefCell; use std::{fs, fs::File, io::Write}; use std::{ fs::OpenOptions, + io, path::{Path, PathBuf}, }; @@ -22,20 +24,35 @@ use super::{ }; use crate::{ corpus::{Corpus, CorpusId, InMemoryCorpus, Testcase}, - inputs::{Input, UsesInput}, + inputs::Input, Error, HasMetadata, }; +/// Creates the given `path` and returns an error if it fails. +/// If the create succeeds, it will return the file. +/// If the create fails for _any_ reason, including, but not limited to, a preexisting existing file of that name, +/// it will instead return the respective [`io::Error`]. +fn create_new>(path: P) -> Result { + OpenOptions::new().write(true).create_new(true).open(path) +} + +/// Tries to create the given `path` and returns `None` _only_ if the file already existed. +/// If the create succeeds, it will return the file. +/// If the create fails for some other reason, it will instead return the respective [`io::Error`]. +fn try_create_new>(path: P) -> Result, io::Error> { + match create_new(path) { + Ok(ret) => Ok(Some(ret)), + Err(err) if err.kind() == io::ErrorKind::AlreadyExists => Ok(None), + Err(err) => Err(err), + } +} + /// A corpus able to store [`Testcase`]s to disk, while also keeping all of them in memory. /// /// Metadata is written to a `..metadata` file in the same folder by default. #[cfg(feature = "std")] #[derive(Default, Serialize, Deserialize, Clone, Debug)] -#[serde(bound = "I: serde::de::DeserializeOwned")] -pub struct InMemoryOnDiskCorpus -where - I: Input, -{ +pub struct InMemoryOnDiskCorpus { inner: InMemoryCorpus, dir_path: PathBuf, meta_format: Option, @@ -43,17 +60,12 @@ where locking: bool, } -impl UsesInput for InMemoryOnDiskCorpus -where - I: Input, -{ - type Input = I; -} - impl Corpus for InMemoryOnDiskCorpus where I: Input, { + type Input = I; + /// Returns the number of all enabled entries #[inline] fn count(&self) -> usize { @@ -74,52 +86,52 @@ where /// Add an enabled testcase to the corpus and return its index #[inline] fn add(&mut self, testcase: Testcase) -> Result { - let idx = self.inner.add(testcase)?; - let testcase = &mut self.get(idx).unwrap().borrow_mut(); - self.save_testcase(testcase, idx)?; + let id = self.inner.add(testcase)?; + let testcase = &mut self.get(id).unwrap().borrow_mut(); + self.save_testcase(testcase, id)?; *testcase.input_mut() = None; - Ok(idx) + Ok(id) } /// Add a disabled testcase to the corpus and return its index #[inline] fn add_disabled(&mut self, testcase: Testcase) -> Result { - let idx = self.inner.add_disabled(testcase)?; - let testcase = &mut self.get_from_all(idx).unwrap().borrow_mut(); - self.save_testcase(testcase, idx)?; + let id = self.inner.add_disabled(testcase)?; + let testcase = &mut self.get_from_all(id).unwrap().borrow_mut(); + self.save_testcase(testcase, id)?; *testcase.input_mut() = None; - Ok(idx) + Ok(id) } /// Replaces the testcase at the given idx #[inline] - fn replace(&mut self, idx: CorpusId, testcase: Testcase) -> Result, Error> { - let entry = self.inner.replace(idx, testcase)?; + fn replace(&mut self, id: CorpusId, testcase: Testcase) -> Result, Error> { + let entry = self.inner.replace(id, testcase)?; self.remove_testcase(&entry)?; - let testcase = &mut self.get(idx).unwrap().borrow_mut(); - self.save_testcase(testcase, idx)?; + let testcase = &mut self.get(id).unwrap().borrow_mut(); + self.save_testcase(testcase, id)?; *testcase.input_mut() = None; Ok(entry) } /// Removes an entry from the corpus, returning it if it was present; considers both enabled and disabled corpus #[inline] - fn remove(&mut self, idx: CorpusId) -> Result, Error> { - let entry = self.inner.remove(idx)?; + fn remove(&mut self, id: CorpusId) -> Result, Error> { + let entry = self.inner.remove(id)?; self.remove_testcase(&entry)?; Ok(entry) } /// Get by id; considers only enabled testcases #[inline] - fn get(&self, idx: CorpusId) -> Result<&RefCell>, Error> { - self.inner.get(idx) + fn get(&self, id: CorpusId) -> Result<&RefCell>, Error> { + self.inner.get(id) } /// Get by id; considers both enabled and disabled testcases #[inline] - fn get_from_all(&self, idx: CorpusId) -> Result<&RefCell>, Error> { - self.inner.get_from_all(idx) + fn get_from_all(&self, id: CorpusId) -> Result<&RefCell>, Error> { + self.inner.get_from_all(id) } /// Current testcase scheduled @@ -135,8 +147,8 @@ where } #[inline] - fn next(&self, idx: CorpusId) -> Option { - self.inner.next(idx) + fn next(&self, id: CorpusId) -> Option { + self.inner.next(id) } /// Peek the next free corpus id @@ -146,8 +158,8 @@ where } #[inline] - fn prev(&self, idx: CorpusId) -> Option { - self.inner.prev(idx) + fn prev(&self, id: CorpusId) -> Option { + self.inner.prev(id) } #[inline] @@ -207,22 +219,19 @@ where fn testcase( &self, id: CorpusId, - ) -> Result::Input>>, Error> { + ) -> Result::Input>>, Error> { Ok(self.get(id)?.borrow()) } fn testcase_mut( &self, id: CorpusId, - ) -> Result::Input>>, Error> { + ) -> Result::Input>>, Error> { Ok(self.get(id)?.borrow_mut()) } } -impl InMemoryOnDiskCorpus -where - I: Input, -{ +impl InMemoryOnDiskCorpus { /// Creates an [`InMemoryOnDiskCorpus`]. /// /// This corpus stores all testcases to disk, and keeps all of them in memory, as well. @@ -295,7 +304,7 @@ where ) -> Result { match fs::create_dir_all(dir_path) { Ok(()) => {} - Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => {} + Err(e) if e.kind() == io::ErrorKind::AlreadyExists => {} Err(e) => return Err(e.into()), } Ok(InMemoryOnDiskCorpus { @@ -331,16 +340,11 @@ where let new_lock_filename = format!(".{new_filename}.lafl_lock"); // Try to create lock file for new testcases - if OpenOptions::new() - .create_new(true) - .write(true) - .open(self.dir_path.join(new_lock_filename)) - .is_err() - { + if let Err(err) = create_new(self.dir_path.join(&new_lock_filename)) { *testcase.filename_mut() = Some(old_filename); - return Err(Error::illegal_state( - "unable to create lock file for new testcase", - )); + return Err(Error::illegal_state(format!( + "Unable to create lock file {new_lock_filename} for new testcase: {err}" + ))); } } @@ -372,10 +376,13 @@ where } } - fn save_testcase(&self, testcase: &mut Testcase, idx: CorpusId) -> Result<(), Error> { + fn save_testcase(&self, testcase: &mut Testcase, id: CorpusId) -> Result<(), Error> + where + I: Input, + { let file_name_orig = testcase.filename_mut().take().unwrap_or_else(|| { // TODO walk entry metadata to ask for pieces of filename (e.g. :havoc in AFL) - testcase.input().as_ref().unwrap().generate_name(idx.0) + testcase.input().as_ref().unwrap().generate_name(Some(id)) }); // New testcase, we need to save it. @@ -387,12 +394,7 @@ where let lockfile_name = format!(".{file_name}.lafl_lock"); let lockfile_path = self.dir_path.join(lockfile_name); - if OpenOptions::new() - .write(true) - .create_new(true) - .open(lockfile_path) - .is_ok() - { + if try_create_new(lockfile_path)?.is_some() { break file_name; } @@ -403,11 +405,7 @@ where file_name }; - if testcase - .file_path() - .as_ref() - .map_or(true, |path| !path.starts_with(&self.dir_path)) - { + if testcase.file_path().is_none() { *testcase.file_path_mut() = Some(self.dir_path.join(&file_name)); } *testcase.filename_mut() = Some(file_name); @@ -421,19 +419,24 @@ where let ondisk_meta = OnDiskMetadata { metadata: testcase.metadata_map(), exec_time: testcase.exec_time(), - executions: testcase.executions(), }; let mut tmpfile = File::create(&tmpfile_path)?; + let json_error = + |err| Error::serialize(format!("Failed to json-ify metadata: {err:?}")); + let serialized = match self.meta_format.as_ref().unwrap() { OnDiskMetadataFormat::Postcard => postcard::to_allocvec(&ondisk_meta)?, - OnDiskMetadataFormat::Json => serde_json::to_vec(&ondisk_meta)?, - OnDiskMetadataFormat::JsonPretty => serde_json::to_vec_pretty(&ondisk_meta)?, - #[cfg(feature = "gzip")] - OnDiskMetadataFormat::JsonGzip => { - GzipCompressor::new().compress(&serde_json::to_vec_pretty(&ondisk_meta)?) + OnDiskMetadataFormat::Json => { + serde_json::to_vec(&ondisk_meta).map_err(json_error)? } + OnDiskMetadataFormat::JsonPretty => { + serde_json::to_vec_pretty(&ondisk_meta).map_err(json_error)? + } + #[cfg(feature = "gzip")] + OnDiskMetadataFormat::JsonGzip => GzipCompressor::new() + .compress(&serde_json::to_vec_pretty(&ondisk_meta).map_err(json_error)?), }; tmpfile.write_all(&serialized)?; fs::rename(&tmpfile_path, &metafile_path)?; @@ -465,3 +468,27 @@ where &self.dir_path } } + +#[cfg(test)] +mod tests { + use std::{env, fs, io::Write}; + + use super::{create_new, try_create_new}; + + #[test] + fn test() { + let tmp = env::temp_dir(); + let path = tmp.join("testfile.tmp"); + _ = fs::remove_file(&path); + let mut f = create_new(&path).unwrap(); + f.write_all(&[0; 1]).unwrap(); + + match try_create_new(&path) { + Ok(None) => (), + Ok(_) => panic!("File {path:?} did not exist even though it should have?"), + Err(e) => panic!("An unexpected error occurred: {e}"), + }; + drop(f); + fs::remove_file(path).unwrap(); + } +} diff --git a/libafl/src/corpus/minimizer.rs b/libafl/src/corpus/minimizer.rs index b8987a1bf6..84c6be3875 100644 --- a/libafl/src/corpus/minimizer.rs +++ b/libafl/src/corpus/minimizer.rs @@ -24,28 +24,6 @@ use crate::{ Error, HasMetadata, HasScheduler, }; -/// `CorpusMinimizers` minimize corpora according to internal logic. See various implementations for -/// details. -pub trait CorpusMinimizer -where - E: UsesState, - E::State: HasCorpus, -{ - /// Minimize the corpus of the provided state. - fn minimize( - &self, - fuzzer: &mut Z, - executor: &mut E, - manager: &mut EM, - state: &mut E::State, - ) -> Result<(), Error> - where - E: Executor + HasObservers, - CS: Scheduler + RemovableScheduler, // schedulers that has on_remove/on_replace only! - EM: EventFirer, - Z: HasScheduler; -} - /// Minimizes a corpus according to coverage maps, weighting by the specified `TestcaseScore`. /// /// Algorithm based on WMOPT: @@ -56,8 +34,7 @@ pub struct MapCorpusMinimizer { } /// Standard corpus minimizer, which weights inputs by length and time. -pub type StdCorpusMinimizer = - MapCorpusMinimizer::State>>; +pub type StdCorpusMinimizer = MapCorpusMinimizer; impl MapCorpusMinimizer where @@ -76,17 +53,19 @@ where } } -impl CorpusMinimizer for MapCorpusMinimizer +impl MapCorpusMinimizer where E: UsesState, for<'a> O: MapObserver + AsIter<'a, Item = T>, C: AsRef, E::State: HasMetadata + HasCorpus + HasExecutions, + <::State as HasCorpus>::Corpus: Corpus, T: Copy + Hash + Eq, TS: TestcaseScore, { + /// Do the minimization #[allow(clippy::too_many_lines)] - fn minimize( + pub fn minimize( &self, fuzzer: &mut Z, executor: &mut E, @@ -95,10 +74,14 @@ where ) -> Result<(), Error> where E: Executor + HasObservers, - CS: Scheduler + RemovableScheduler, + E::Observers: ObserversTuple, + CS: Scheduler + RemovableScheduler, EM: EventFirer, Z: HasScheduler, { + // don't delete this else it won't work after restart + let current = *state.corpus().current(); + let cfg = Config::default(); let ctx = Context::new(&cfg); let opt = Optimize::new(&ctx); @@ -116,9 +99,9 @@ where let total = state.corpus().count() as u64; let mut curr = 0; - while let Some(idx) = cur_id { + while let Some(id) = cur_id { let (weight, input) = { - let mut testcase = state.corpus().get(idx)?.borrow_mut(); + let mut testcase = state.corpus().get(id)?.borrow_mut(); let weight = TS::compute(state, &mut *testcase)? .to_u64() .expect("Weight must be computable."); @@ -177,9 +160,9 @@ where } // Keep track of that seed's index and weight - seed_exprs.insert(seed_expr, (idx, weight)); + seed_exprs.insert(seed_expr, (id, weight)); - cur_id = state.corpus().next(idx); + cur_id = state.corpus().next(id); } manager.log( @@ -215,22 +198,28 @@ where let res = if let Some(model) = opt.get_model() { let mut removed = Vec::with_capacity(state.corpus().count()); - for (seed, (idx, _)) in seed_exprs { + for (seed, (id, _)) in seed_exprs { // if the model says the seed isn't there, mark it for deletion if !model.eval(&seed, true).unwrap().as_bool().unwrap() { - removed.push(idx); + removed.push(id); } } // reverse order; if indexes are stored in a vec, we need to remove from back to front - removed.sort_unstable_by(|idx1, idx2| idx2.cmp(idx1)); - for idx in removed { - let removed = state.corpus_mut().remove(idx)?; + removed.sort_unstable_by(|id1, id2| id2.cmp(id1)); + for id in removed { + if let Some(_cur) = current { + continue; + } + + let removed = state.corpus_mut().remove(id)?; // scheduler needs to know we've removed the input, or it will continue to try // to use now-missing inputs fuzzer .scheduler_mut() - .on_remove(state, idx, &Some(removed))?; + .on_remove(state, id, &Some(removed))?; } + + *state.corpus_mut().current_mut() = None; //we may have removed the current ID from the corpus Ok(()) } else { Err(Error::unknown("Corpus minimization failed; unsat.")) diff --git a/libafl/src/corpus/mod.rs b/libafl/src/corpus/mod.rs index c3b1f5f546..f90d7b3802 100644 --- a/libafl/src/corpus/mod.rs +++ b/libafl/src/corpus/mod.rs @@ -31,12 +31,12 @@ pub use minimizer::*; pub use nop::NopCorpus; use serde::{Deserialize, Serialize}; -use crate::{inputs::UsesInput, Error}; +use crate::Error; /// An abstraction for the index that identify a testcase in the corpus #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)] #[repr(transparent)] -pub struct CorpusId(pub(crate) usize); +pub struct CorpusId(pub usize); impl fmt::Display for CorpusId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { @@ -63,29 +63,48 @@ impl From for usize { } } -/// Utility macro to call `Corpus::random_id`; fetches only enabled testcases +/// Utility macro to call `Corpus::random_id`; fetches only enabled [`Testcase`]`s` #[macro_export] macro_rules! random_corpus_id { ($corpus:expr, $rand:expr) => {{ let cnt = $corpus.count(); - let nth = $rand.below(cnt); + #[cfg(debug_assertions)] + let nth = $rand.below(core::num::NonZero::new(cnt).expect("Corpus may not be empty!")); + // # Safety + // This is a hot path. We try to be as fast as possible here. + // In debug this is checked (see above.) + // The worst that can happen is a wrong integer to get returned. + // In this case, the call below will fail. + #[cfg(not(debug_assertions))] + let nth = $rand.below(unsafe { core::num::NonZero::new(cnt).unwrap_unchecked() }); $corpus.nth(nth) }}; } -/// Utility macro to call `Corpus::random_id`; fetches both enabled and disabled testcases +/// Utility macro to call `Corpus::random_id`; fetches both enabled and disabled [`Testcase`]`s` /// Note: use `Corpus::get_from_all` as disabled entries are inaccessible from `Corpus::get` #[macro_export] macro_rules! random_corpus_id_with_disabled { ($corpus:expr, $rand:expr) => {{ let cnt = $corpus.count_all(); - let nth = $rand.below(cnt); + #[cfg(debug_assertions)] + let nth = $rand.below(core::num::NonZero::new(cnt).expect("Corpus may not be empty!")); + // # Safety + // This is a hot path. We try to be as fast as possible here. + // In debug this is checked (see above.) + // The worst that can happen is a wrong integer to get returned. + // In this case, the call below will fail. + #[cfg(not(debug_assertions))] + let nth = $rand.below(unsafe { core::num::NonZero::new(cnt).unwrap_unchecked() }); $corpus.nth_from_all(nth) }}; } /// Corpus with all current [`Testcase`]s, or solutions -pub trait Corpus: UsesInput + Serialize + for<'de> Deserialize<'de> { +pub trait Corpus: Sized { + /// The type of input contained in this corpus + type Input; + /// Returns the number of all enabled entries fn count(&self) -> usize; @@ -109,7 +128,7 @@ pub trait Corpus: UsesInput + Serialize + for<'de> Deserialize<'de> { /// Replaces the [`Testcase`] at the given idx, returning the existing. fn replace( &mut self, - idx: CorpusId, + id: CorpusId, testcase: Testcase, ) -> Result, Error>; @@ -171,8 +190,11 @@ pub trait Corpus: UsesInput + Serialize + for<'de> Deserialize<'de> { fn store_input_from(&self, testcase: &Testcase) -> Result<(), Error>; /// Loads the `Input` for a given [`CorpusId`] from the [`Corpus`], and returns the clone. - fn cloned_input_for_id(&self, idx: CorpusId) -> Result { - let mut testcase = self.get(idx)?.borrow_mut(); + fn cloned_input_for_id(&self, id: CorpusId) -> Result + where + Self::Input: Clone, + { + let mut testcase = self.get(id)?.borrow_mut(); Ok(testcase.load_input(self)?.clone()) } } @@ -180,10 +202,10 @@ pub trait Corpus: UsesInput + Serialize + for<'de> Deserialize<'de> { /// Trait for types which track the current corpus index pub trait HasCurrentCorpusId { /// Set the current corpus index; we have started processing this corpus entry - fn set_corpus_idx(&mut self, idx: CorpusId) -> Result<(), Error>; + fn set_corpus_id(&mut self, id: CorpusId) -> Result<(), Error>; /// Clear the current corpus index; we are done with this entry - fn clear_corpus_idx(&mut self) -> Result<(), Error>; + fn clear_corpus_id(&mut self) -> Result<(), Error>; /// Fetch the current corpus index -- typically used after a state recovery or transfer fn current_corpus_id(&self) -> Result, Error>; @@ -200,7 +222,7 @@ where cur_back: Option, } -impl<'a, C> Iterator for CorpusIdIterator<'a, C> +impl Iterator for CorpusIdIterator<'_, C> where C: Corpus, { @@ -216,7 +238,7 @@ where } } -impl<'a, C> DoubleEndedIterator for CorpusIdIterator<'a, C> +impl DoubleEndedIterator for CorpusIdIterator<'_, C> where C: Corpus, { diff --git a/libafl/src/corpus/nop.rs b/libafl/src/corpus/nop.rs index b7d668fd27..e50173f9e0 100644 --- a/libafl/src/corpus/nop.rs +++ b/libafl/src/corpus/nop.rs @@ -5,29 +5,18 @@ use serde::{Deserialize, Serialize}; use crate::{ corpus::{Corpus, CorpusId, Testcase}, - inputs::{Input, UsesInput}, Error, }; /// A corpus which does not store any [`Testcase`]s. #[derive(Default, Serialize, Deserialize, Clone, Debug)] -#[serde(bound = "I: serde::de::DeserializeOwned")] pub struct NopCorpus { empty: Option, phantom: PhantomData, } -impl UsesInput for NopCorpus -where - I: Input, -{ +impl Corpus for NopCorpus { type Input = I; -} - -impl Corpus for NopCorpus -where - I: Input, -{ /// Returns the number of all enabled entries #[inline] fn count(&self) -> usize { @@ -57,27 +46,27 @@ where Err(Error::unsupported("Unsupported by NopCorpus")) } - /// Replaces the testcase at the given idx + /// Replaces the testcase with the given id #[inline] - fn replace(&mut self, _idx: CorpusId, _testcase: Testcase) -> Result, Error> { + fn replace(&mut self, _id: CorpusId, _testcase: Testcase) -> Result, Error> { Err(Error::unsupported("Unsupported by NopCorpus")) } /// Removes an entry from the corpus, returning it if it was present; considers both enabled and disabled testcases #[inline] - fn remove(&mut self, _idx: CorpusId) -> Result, Error> { + fn remove(&mut self, _id: CorpusId) -> Result, Error> { Err(Error::unsupported("Unsupported by NopCorpus")) } /// Get by id; considers only enabled testcases #[inline] - fn get(&self, _idx: CorpusId) -> Result<&RefCell>, Error> { + fn get(&self, _id: CorpusId) -> Result<&RefCell>, Error> { Err(Error::unsupported("Unsupported by NopCorpus")) } /// Get by id; considers both enabled and disabled testcases #[inline] - fn get_from_all(&self, _idx: CorpusId) -> Result<&RefCell>, Error> { + fn get_from_all(&self, _id: CorpusId) -> Result<&RefCell>, Error> { Err(Error::unsupported("Unsupported by NopCorpus")) } @@ -100,12 +89,12 @@ where } #[inline] - fn next(&self, _idx: CorpusId) -> Option { + fn next(&self, _id: CorpusId) -> Option { None } #[inline] - fn prev(&self, _idx: CorpusId) -> Option { + fn prev(&self, _id: CorpusId) -> Option { None } @@ -142,10 +131,7 @@ where } } -impl NopCorpus -where - I: Input, -{ +impl NopCorpus { /// Creates a new [`NopCorpus`]. #[must_use] pub fn new() -> Self { diff --git a/libafl/src/corpus/ondisk.rs b/libafl/src/corpus/ondisk.rs index 82cdd2cee5..db32fd37d9 100644 --- a/libafl/src/corpus/ondisk.rs +++ b/libafl/src/corpus/ondisk.rs @@ -1,20 +1,23 @@ -//! The ondisk corpus stores all [`Testcase`]s to disk. -//! It never keeps any of them in memory. -//! This is a good solution for solutions that are never reused, and for very memory-constraint environments. +//! The [`OnDiskCorpus`] stores all [`Testcase`]s to disk. +//! +//! It _never_ keeps any of them in memory. +//! This is a good solution for solutions that are never reused, or for *very* memory-constraint environments. //! For any other occasions, consider using [`crate::corpus::CachedOnDiskCorpus`] -//! which stores a certain number of testcases in memory and removes additional ones in a FIFO manner. +//! which stores a certain number of [`Testcase`]s in memory and removes additional ones in a FIFO manner. use alloc::string::String; -use core::{cell::RefCell, time::Duration}; +use core::{ + cell::{Ref, RefCell, RefMut}, + time::Duration, +}; use std::path::{Path, PathBuf}; use libafl_bolts::serdeany::SerdeAnyMap; use serde::{Deserialize, Serialize}; -use super::{CachedOnDiskCorpus, HasTestcase}; use crate::{ - corpus::{Corpus, CorpusId, Testcase}, - inputs::{Input, UsesInput}, + corpus::{CachedOnDiskCorpus, Corpus, CorpusId, HasTestcase, Testcase}, + inputs::Input, Error, }; @@ -41,36 +44,24 @@ pub struct OnDiskMetadata<'a> { pub metadata: &'a SerdeAnyMap, /// The exec time for this [`Testcase`] pub exec_time: &'a Option, - /// The amount of executions for this [`Testcase`] - pub executions: &'a u64, } /// A corpus able to store [`Testcase`]s to disk, and load them from disk, when they are being used. /// /// Metadata is written to a `..metadata` file in the same folder by default. #[derive(Default, Serialize, Deserialize, Clone, Debug)] -#[serde(bound = "I: serde::de::DeserializeOwned")] -pub struct OnDiskCorpus -where - I: Input, -{ +pub struct OnDiskCorpus { /// The root directory backing this corpus dir_path: PathBuf, /// We wrapp a cached corpus and set its size to 1. inner: CachedOnDiskCorpus, } -impl UsesInput for OnDiskCorpus -where - I: Input, -{ - type Input = I; -} - impl Corpus for OnDiskCorpus where I: Input, { + type Input = I; /// Returns the number of all enabled entries #[inline] fn count(&self) -> usize { @@ -102,8 +93,8 @@ where /// Replaces the testcase at the given idx #[inline] - fn replace(&mut self, idx: CorpusId, testcase: Testcase) -> Result, Error> { - self.inner.replace(idx, testcase) + fn replace(&mut self, id: CorpusId, testcase: Testcase) -> Result, Error> { + self.inner.replace(id, testcase) } /// Peek the next free corpus id @@ -114,20 +105,20 @@ where /// Removes an entry from the corpus, returning it if it was present; considers both enabled and disabled testcases #[inline] - fn remove(&mut self, idx: CorpusId) -> Result, Error> { - self.inner.remove(idx) + fn remove(&mut self, id: CorpusId) -> Result, Error> { + self.inner.remove(id) } /// Get by id; will check the disabled corpus if not available in the enabled #[inline] - fn get(&self, idx: CorpusId) -> Result<&RefCell>, Error> { - self.inner.get(idx) + fn get(&self, id: CorpusId) -> Result<&RefCell>, Error> { + self.inner.get(id) } /// Get by id; considers both enabled and disabled testcases #[inline] - fn get_from_all(&self, idx: CorpusId) -> Result<&RefCell>, Error> { - self.inner.get_from_all(idx) + fn get_from_all(&self, id: CorpusId) -> Result<&RefCell>, Error> { + self.inner.get_from_all(id) } /// Current testcase scheduled @@ -143,13 +134,13 @@ where } #[inline] - fn next(&self, idx: CorpusId) -> Option { - self.inner.next(idx) + fn next(&self, id: CorpusId) -> Option { + self.inner.next(id) } #[inline] - fn prev(&self, idx: CorpusId) -> Option { - self.inner.prev(idx) + fn prev(&self, id: CorpusId) -> Option { + self.inner.prev(id) } #[inline] @@ -188,25 +179,16 @@ impl HasTestcase for OnDiskCorpus where I: Input, { - fn testcase( - &self, - id: CorpusId, - ) -> Result::Input>>, Error> { + fn testcase(&self, id: CorpusId) -> Result>, Error> { Ok(self.get(id)?.borrow()) } - fn testcase_mut( - &self, - id: CorpusId, - ) -> Result::Input>>, Error> { + fn testcase_mut(&self, id: CorpusId) -> Result>, Error> { Ok(self.get(id)?.borrow_mut()) } } -impl OnDiskCorpus -where - I: Input, -{ +impl OnDiskCorpus { /// Creates an [`OnDiskCorpus`]. /// /// This corpus stores all testcases to disk. diff --git a/libafl/src/corpus/testcase.rs b/libafl/src/corpus/testcase.rs index 2808c8a8e0..8c76399594 100644 --- a/libafl/src/corpus/testcase.rs +++ b/libafl/src/corpus/testcase.rs @@ -15,34 +15,29 @@ use libafl_bolts::{serdeany::SerdeAnyMap, HasLen}; use serde::{Deserialize, Serialize}; use super::Corpus; -use crate::{ - corpus::CorpusId, - inputs::{Input, UsesInput}, - Error, HasMetadata, -}; +use crate::{corpus::CorpusId, state::HasCorpus, Error, HasMetadata}; /// Shorthand to receive a [`Ref`] or [`RefMut`] to a stored [`Testcase`], by [`CorpusId`]. /// For a normal state, this should return a [`Testcase`] in the corpus, not the objectives. -pub trait HasTestcase: UsesInput { +pub trait HasTestcase: HasCorpus { /// Shorthand to receive a [`Ref`] to a stored [`Testcase`], by [`CorpusId`]. /// For a normal state, this should return a [`Testcase`] in the corpus, not the objectives. - fn testcase(&self, id: CorpusId) -> Result::Input>>, Error>; + fn testcase( + &self, + id: CorpusId, + ) -> Result::Input>>, Error>; /// Shorthand to receive a [`RefMut`] to a stored [`Testcase`], by [`CorpusId`]. /// For a normal state, this should return a [`Testcase`] in the corpus, not the objectives. fn testcase_mut( &self, id: CorpusId, - ) -> Result::Input>>, Error>; + ) -> Result::Input>>, Error>; } /// An entry in the [`Testcase`] Corpus #[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(bound = "I: serde::de::DeserializeOwned")] -pub struct Testcase -where - I: Input, -{ +pub struct Testcase { /// The [`Input`] of this [`Testcase`], or `None`, if it is not currently in memory input: Option, /// The filename for this [`Testcase`] @@ -59,8 +54,6 @@ where exec_time: Option, /// Cached len of the input, if any cached_len: Option, - /// Number of executions done at discovery time - executions: u64, /// Number of fuzzing iterations of this particular input updated in `perform_mutational` scheduled_count: usize, /// Parent [`CorpusId`], if known @@ -77,10 +70,7 @@ where hit_objectives: Vec>, } -impl HasMetadata for Testcase -where - I: Input, -{ +impl HasMetadata for Testcase { /// Get all the metadata into an [`hashbrown::HashMap`] #[inline] fn metadata_map(&self) -> &SerdeAnyMap { @@ -95,10 +85,7 @@ where } /// Impl of a testcase -impl Testcase -where - I: Input, -{ +impl Testcase { /// Returns this [`Testcase`] with a loaded `Input`] pub fn load_input>(&mut self, corpus: &C) -> Result<&I, Error> { corpus.load_input_into(self)?; @@ -120,8 +107,7 @@ where /// Set the input #[inline] - pub fn set_input(&mut self, mut input: I) { - input.wrapped_as_testcase(); + pub fn set_input(&mut self, input: I) { self.input = Some(input); } @@ -183,18 +169,6 @@ where self.exec_time = Some(time); } - /// Get the executions - #[inline] - pub fn executions(&self) -> &u64 { - &self.executions - } - - /// Get the executions (mutable) - #[inline] - pub fn executions_mut(&mut self) -> &mut u64 { - &mut self.executions - } - /// Get the `scheduled_count` #[inline] pub fn scheduled_count(&self) -> usize { @@ -249,8 +223,7 @@ where /// Create a new Testcase instance given an input #[inline] - pub fn new(mut input: I) -> Self { - input.wrapped_as_testcase(); + pub fn new(input: I) -> Self { Self { input: Some(input), filename: None, @@ -261,7 +234,6 @@ where metadata_path: None, exec_time: None, cached_len: None, - executions: 0, scheduled_count: 0, parent_id: None, disabled: false, @@ -275,8 +247,7 @@ where /// Creates a testcase, attaching the id of the parent /// that this [`Testcase`] was derived from on creation - pub fn with_parent_id(mut input: I, parent_id: CorpusId) -> Self { - input.wrapped_as_testcase(); + pub fn with_parent_id(input: I, parent_id: CorpusId) -> Self { Testcase { input: Some(input), filename: None, @@ -287,7 +258,6 @@ where metadata_path: None, exec_time: None, cached_len: None, - executions: 0, scheduled_count: 0, parent_id: Some(parent_id), disabled: false, @@ -299,10 +269,9 @@ where } } - /// Create a new Testcase instance given an [`Input`] and a `filename` + /// Create a new Testcase instance given an input and a `filename` #[inline] - pub fn with_filename(mut input: I, filename: String) -> Self { - input.wrapped_as_testcase(); + pub fn with_filename(input: I, filename: String) -> Self { Self { input: Some(input), filename: Some(filename), @@ -313,33 +282,6 @@ where metadata_path: None, exec_time: None, cached_len: None, - executions: 0, - scheduled_count: 0, - parent_id: None, - disabled: false, - objectives_found: 0, - #[cfg(feature = "track_hit_feedbacks")] - hit_feedbacks: Vec::new(), - #[cfg(feature = "track_hit_feedbacks")] - hit_objectives: Vec::new(), - } - } - - /// Create a new Testcase instance given an [`Input`] and the number of executions - #[inline] - pub fn with_executions(mut input: I, executions: u64) -> Self { - input.wrapped_as_testcase(); - Self { - input: Some(input), - filename: None, - #[cfg(feature = "std")] - file_path: None, - metadata: SerdeAnyMap::default(), - #[cfg(feature = "std")] - metadata_path: None, - exec_time: None, - cached_len: None, - executions, scheduled_count: 0, parent_id: None, disabled: false, @@ -378,10 +320,7 @@ where } } -impl Default for Testcase -where - I: Input, -{ +impl Default for Testcase { /// Create a new default Testcase #[inline] fn default() -> Self { @@ -392,7 +331,6 @@ where exec_time: None, cached_len: None, scheduled_count: 0, - executions: 0, parent_id: None, #[cfg(feature = "std")] file_path: None, @@ -411,7 +349,7 @@ where /// Impl of a testcase when the input has len impl Testcase where - I: Input + HasLen, + I: HasLen, { /// Get the cached `len`. Will `Error::EmptyOptional` if `len` is not yet cached. #[inline] @@ -441,10 +379,7 @@ where } /// Create a testcase from an input -impl From for Testcase -where - I: Input, -{ +impl From for Testcase { fn from(input: I) -> Self { Testcase::new(input) } @@ -563,10 +498,7 @@ impl SchedulerTestcaseMetadata { libafl_bolts::impl_serdeany!(SchedulerTestcaseMetadata); #[cfg(feature = "std")] -impl Drop for Testcase -where - I: Input, -{ +impl Drop for Testcase { fn drop(&mut self) { if let Some(filename) = &self.filename { let mut path = PathBuf::from(filename); diff --git a/libafl/src/events/llmp/hooks/centralized.rs b/libafl/src/events/broker_hooks/centralized.rs similarity index 89% rename from libafl/src/events/llmp/hooks/centralized.rs rename to libafl/src/events/broker_hooks/centralized.rs index 7473384333..eaa6c6bb4b 100644 --- a/libafl/src/events/llmp/hooks/centralized.rs +++ b/libafl/src/events/broker_hooks/centralized.rs @@ -1,4 +1,5 @@ -use std::{fmt::Debug, marker::PhantomData}; +use alloc::vec::Vec; +use core::{fmt::Debug, marker::PhantomData}; #[cfg(feature = "llmp_compression")] use libafl_bolts::{compress::GzipCompressor, llmp::LLMP_FLAG_COMPRESSED}; @@ -34,6 +35,7 @@ where msg_tag: &mut Tag, _msg_flags: &mut Flags, msg: &mut [u8], + _new_msgs: &mut Vec<(Tag, Flags, Vec)>, ) -> Result { if *msg_tag == _LLMP_TAG_TO_MAIN { #[cfg(feature = "llmp_compression")] @@ -93,16 +95,7 @@ where event: &Event, ) -> Result { match &event { - Event::NewTestcase { - input: _, - client_config: _, - exit_kind: _, - corpus_size: _, - observers_buf: _, - time: _, - executions: _, - forward_id: _, - } => Ok(BrokerEventResult::Forward), + Event::NewTestcase { .. } | Event::Stop => Ok(BrokerEventResult::Forward), _ => Ok(BrokerEventResult::Handled), } } diff --git a/libafl/src/events/broker_hooks/centralized_multi_machine.rs b/libafl/src/events/broker_hooks/centralized_multi_machine.rs new file mode 100644 index 0000000000..0630f13572 --- /dev/null +++ b/libafl/src/events/broker_hooks/centralized_multi_machine.rs @@ -0,0 +1,290 @@ +use std::{ + fmt::{Debug, Display}, + marker::PhantomData, + slice, + sync::Arc, + vec::Vec, +}; + +#[cfg(feature = "llmp_compression")] +use libafl_bolts::llmp::LLMP_FLAG_COMPRESSED; +use libafl_bolts::{ + llmp::{Flags, LlmpBrokerInner, LlmpHook, LlmpMsgHookResult, Tag, LLMP_FLAG_FROM_MM}, + ownedref::OwnedRef, + shmem::ShMemProvider, + ClientId, Error, +}; +use tokio::{ + net::ToSocketAddrs, + runtime::Runtime, + sync::{RwLock, RwLockWriteGuard}, + task::JoinHandle, +}; + +use crate::{ + events::{ + centralized::_LLMP_TAG_TO_MAIN, + multi_machine::{MultiMachineMsg, TcpMultiMachineState}, + Event, + }, + inputs::Input, +}; + +/// Makes a raw pointer send + sync. +/// Extremely unsafe to use in general, only use this if you know what you're doing. +#[derive(Debug, Clone, Copy)] +pub struct NullLock { + value: T, +} + +unsafe impl Send for NullLock {} +unsafe impl Sync for NullLock {} + +impl NullLock { + /// Instantiate a [`NullLock`] + /// + /// # Safety + /// + /// The null lock makes anything Send + Sync, which is usually very dangerous. + pub unsafe fn new(value: T) -> Self { + Self { value } + } + + /// Get a reference to value + pub fn get(&self) -> &T { + &self.value + } + + /// Get a mutable reference to value + pub fn get_mut(&mut self) -> &mut T { + &mut self.value + } + + /// Get back the value + pub fn into_innter(self) -> T { + self.value + } +} + +/// The Receiving side of the multi-machine architecture +/// It is responsible for receiving messages from other neighbours. +/// Please check [`crate::events::multi_machine`] for more information. +#[derive(Debug)] +pub struct TcpMultiMachineLlmpSenderHook +where + I: Input, +{ + /// the actual state of the broker hook + shared_state: Arc>>, + /// the tokio runtime used to interact with other machines. Keep it outside to avoid locking it. + rt: Arc, + phantom: PhantomData, +} + +/// The Receiving side of the multi-machine architecture +/// It is responsible for receiving messages from other neighbours. +/// Please check [`crate::events::multi_machine`] for more information. +#[derive(Debug)] +pub struct TcpMultiMachineLlmpReceiverHook +where + I: Input, +{ + /// the actual state of the broker hook + shared_state: Arc>>, + /// the tokio runtime used to interact with other machines. Keep it outside to avoid locking it. + rt: Arc, + phantom: PhantomData, +} + +impl TcpMultiMachineLlmpSenderHook +where + A: Clone + Display + ToSocketAddrs + Send + Sync + 'static, + I: Input + Send + Sync + 'static, +{ + /// Should not be created alone. Use [`TcpMultiMachineHooksBuilder`] instead. + pub(crate) fn new( + shared_state: Arc>>, + rt: Arc, + ) -> Self { + Self { + shared_state, + rt, + phantom: PhantomData, + } + } +} + +impl TcpMultiMachineLlmpReceiverHook +where + A: Clone + Display + ToSocketAddrs + Send + Sync + 'static, + I: Input + Send + Sync + 'static, +{ + /// Should not be created alone. Use [`TcpMultiMachineHooksBuilder`] instead. + /// + /// # Safety + /// For [`Self::on_new_message`], this struct assumes that the `msg` parameter + /// (or rather, the memory it points to), lives sufficiently long + /// for an async background task to process it. + pub(crate) unsafe fn new( + shared_state: Arc>>, + rt: Arc, + ) -> Self { + Self { + shared_state, + rt, + phantom: PhantomData, + } + } + + #[cfg(feature = "llmp_compression")] + fn try_compress( + state_lock: &mut RwLockWriteGuard>, + event: &Event, + ) -> Result<(Flags, Vec), Error> { + let serialized = postcard::to_allocvec(&event)?; + + match state_lock.compressor().maybe_compress(&serialized) { + Some(comp_buf) => Ok((LLMP_FLAG_COMPRESSED, comp_buf)), + None => Ok((Flags(0), serialized)), + } + } + + #[cfg(not(feature = "llmp_compression"))] + fn try_compress( + _state_lock: &mut RwLockWriteGuard>, + event: &Event, + ) -> Result<(Flags, Vec), Error> { + Ok((Flags(0), postcard::to_allocvec(&event)?)) + } +} + +impl LlmpHook for TcpMultiMachineLlmpSenderHook +where + A: Clone + Debug + Display + ToSocketAddrs + Send + Sync + 'static, + SP: ShMemProvider, + I: Input + Send + Sync + 'static, +{ + /// check for received messages, and forward them alongside the incoming message to inner. + fn on_new_message( + &mut self, + _broker_inner: &mut LlmpBrokerInner, + _client_id: ClientId, + _msg_tag: &mut Tag, + _msg_flags: &mut Flags, + msg: &mut [u8], + _new_msgs: &mut Vec<(Tag, Flags, Vec)>, + ) -> Result { + let shared_state = self.shared_state.clone(); + + // # Safety + // Here, we suppose msg will *never* be written again and will always be available. + // Thus, it is safe to handle this in a separate thread. + let msg_lock = unsafe { NullLock::new((msg.as_ptr(), msg.len())) }; + // let flags = msg_flags.clone(); + + let _handle: JoinHandle> = self.rt.spawn(async move { + let mut state_wr_lock = shared_state.write().await; + let (msg_ptr, msg_len) = msg_lock.into_innter(); + let msg: &[u8] = unsafe { slice::from_raw_parts(msg_ptr, msg_len) }; // most likely crash here + + // #[cfg(not(feature = "llmp_compression"))] + // let event_bytes = msg; + // #[cfg(feature = "llmp_compression")] + // let compressed; + // #[cfg(feature = "llmp_compression")] + // let event_bytes = if flags & LLMP_FLAG_COMPRESSED == LLMP_FLAG_COMPRESSED { + // compressed = state_wr_lock.compressor().decompress(msg)?; + // &compressed + // } else { + // &*msg + // }; + // let event: Event = postcard::from_bytes(event_bytes)?; + + let mm_msg: MultiMachineMsg = MultiMachineMsg::llmp_msg(OwnedRef::Ref(msg)); + + // TODO: do not copy here + state_wr_lock.add_past_msg(msg); + + log::debug!("Sending msg..."); + + state_wr_lock + .send_interesting_event_to_nodes(&mm_msg) + .await?; + + log::debug!("msg sent."); + + Ok(()) + }); + + Ok(LlmpMsgHookResult::ForwardToClients) + } +} + +impl LlmpHook for TcpMultiMachineLlmpReceiverHook +where + A: Clone + Debug + Display + ToSocketAddrs + Send + Sync + 'static, + SP: ShMemProvider, + I: Input + Send + Sync + 'static, +{ + /// check for received messages, and forward them alongside the incoming message to inner. + fn on_new_message( + &mut self, + _broker_inner: &mut LlmpBrokerInner, + _client_id: ClientId, + _msg_tag: &mut Tag, + _msg_flags: &mut Flags, + _msg: &mut [u8], + new_msgs: &mut Vec<(Tag, Flags, Vec)>, + ) -> Result { + let shared_state = self.shared_state.clone(); + + let res: Result<(), Error> = self.rt.block_on(async move { + let mut state_wr_lock = shared_state.write().await; + + let mut incoming_msgs: Vec> = Vec::new(); + state_wr_lock + .receive_new_messages_from_nodes(&mut incoming_msgs) + .await?; + + log::debug!("received {} new incoming msg(s)", incoming_msgs.len()); + + let msgs_to_forward: Result)>, Error> = incoming_msgs + .into_iter() + .map(|mm_msg| match mm_msg { + MultiMachineMsg::LlmpMsg(msg) => { + let msg = msg.into_owned().unwrap().into_vec(); + #[cfg(feature = "llmp_compression")] + match state_wr_lock.compressor().maybe_compress(msg.as_ref()) { + Some(comp_buf) => Ok(( + _LLMP_TAG_TO_MAIN, + LLMP_FLAG_COMPRESSED | LLMP_FLAG_FROM_MM, + comp_buf, + )), + None => Ok((_LLMP_TAG_TO_MAIN, LLMP_FLAG_FROM_MM, msg)), + } + #[cfg(not(feature = "llmp_compression"))] + Ok((_LLMP_TAG_TO_MAIN, LLMP_FLAG_FROM_MM, msg)) + } + MultiMachineMsg::Event(evt) => { + let evt = evt.into_owned().unwrap(); + let (inner_flags, buf) = + Self::try_compress(&mut state_wr_lock, evt.as_ref())?; + + Ok((_LLMP_TAG_TO_MAIN, inner_flags | LLMP_FLAG_FROM_MM, buf)) + } + }) + .collect(); + + new_msgs.extend(msgs_to_forward?); + + Ok(()) + }); + + res?; + + // Add incoming events to the ones we should filter + // events.extend_from_slice(&incoming_events); + + Ok(LlmpMsgHookResult::ForwardToClients) + } +} diff --git a/libafl/src/events/llmp/hooks/mod.rs b/libafl/src/events/broker_hooks/mod.rs similarity index 87% rename from libafl/src/events/llmp/hooks/mod.rs rename to libafl/src/events/broker_hooks/mod.rs index 07af0cc0b4..90a4cedb2a 100644 --- a/libafl/src/events/llmp/hooks/mod.rs +++ b/libafl/src/events/broker_hooks/mod.rs @@ -1,4 +1,5 @@ -//! Standard LLMP hook +//! Hooks called on broker side +use alloc::vec::Vec; use core::marker::PhantomData; #[cfg(feature = "llmp_compression")] @@ -21,6 +22,14 @@ use crate::{ /// centralized hook #[cfg(all(unix, feature = "std"))] pub mod centralized; +#[cfg(all(unix, feature = "std"))] +pub use centralized::*; + +/// Multi-machine hook +#[cfg(all(unix, feature = "multi_machine"))] +pub mod centralized_multi_machine; +#[cfg(all(unix, feature = "multi_machine"))] +pub use centralized_multi_machine::*; /// An LLMP-backed event hook for scalable multi-processed fuzzing #[derive(Debug)] @@ -45,6 +54,7 @@ where #[cfg(feature = "llmp_compression")] msg_flags: &mut Flags, #[cfg(not(feature = "llmp_compression"))] _msg_flags: &mut Flags, msg: &mut [u8], + _new_msgs: &mut Vec<(Tag, Flags, Vec)>, ) -> Result { let monitor = &mut self.monitor; #[cfg(feature = "llmp_compression")] @@ -102,14 +112,9 @@ where ) -> Result { match &event { Event::NewTestcase { - input: _, - client_config: _, - exit_kind: _, corpus_size, - observers_buf: _, - time, - executions, forward_id, + .. } => { let id = if let Some(id) = *forward_id { id @@ -120,12 +125,6 @@ where monitor.client_stats_insert(id); let client = monitor.client_stats_mut_for(id); client.update_corpus_size(*corpus_size as u64); - if id == client_id { - // do not update executions for forwarded messages, otherwise we loose the total order - // as a forwarded msg with a lower executions may arrive after a stats msg with an higher executions - // this also means when you wrap this event manger with centralized EM, you will **NOT** get executions update with the new tc message - client.update_executions(*executions, *time); - } monitor.display(event.name(), id); Ok(BrokerEventResult::Forward) } @@ -178,15 +177,10 @@ where // Correctly handled the event Ok(BrokerEventResult::Handled) } - Event::Objective { - objective_size, - executions, - time, - } => { + Event::Objective { objective_size, .. } => { monitor.client_stats_insert(client_id); let client = monitor.client_stats_mut_for(client_id); client.update_objective_size(*objective_size as u64); - client.update_executions(*executions, *time); monitor.display(event.name(), client_id); Ok(BrokerEventResult::Handled) } @@ -201,6 +195,7 @@ where Ok(BrokerEventResult::Handled) } Event::CustomBuf { .. } => Ok(BrokerEventResult::Forward), + Event::Stop => Ok(BrokerEventResult::Forward), //_ => Ok(BrokerEventResult::Forward), } } diff --git a/libafl/src/events/centralized.rs b/libafl/src/events/centralized.rs index b9ce3055ee..515c8ec5b9 100644 --- a/libafl/src/events/centralized.rs +++ b/libafl/src/events/centralized.rs @@ -9,6 +9,7 @@ use alloc::{boxed::Box, string::String, vec::Vec}; use core::{fmt::Debug, time::Duration}; +use std::{marker::PhantomData, process}; #[cfg(feature = "llmp_compression")] use libafl_bolts::{ @@ -31,14 +32,14 @@ use crate::state::HasScalabilityMonitor; use crate::{ events::{ AdaptiveSerializer, CustomBufEventResult, Event, EventConfig, EventFirer, EventManager, - EventManagerId, EventProcessor, EventRestarter, HasCustomBufHandlers, HasEventManagerId, - LogSeverity, ProgressReporter, + EventManagerHooksTuple, EventManagerId, EventProcessor, EventRestarter, + HasCustomBufHandlers, HasEventManagerId, LogSeverity, ProgressReporter, }, executors::{Executor, HasObservers}, fuzzer::{EvaluatorObservers, ExecutionProcessor}, inputs::{Input, NopInput, UsesInput}, observers::{ObserversTuple, TimeObserver}, - state::{HasExecutions, HasLastReportTime, NopState, UsesState}, + state::{HasExecutions, HasLastReportTime, NopState, State, Stoppable, UsesState}, Error, HasMetadata, }; @@ -46,21 +47,32 @@ pub(crate) const _LLMP_TAG_TO_MAIN: Tag = Tag(0x3453453); /// A wrapper manager to implement a main-secondary architecture with another broker #[derive(Debug)] -pub struct CentralizedEventManager +pub struct CentralizedEventManager where EM: UsesState, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, { inner: EM, - /// The LLMP client for inter process communication + /// The centralized LLMP client for inter process communication client: LlmpClient, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor, time_ref: Option>, + hooks: EMH, is_main: bool, + phantom: PhantomData, } -impl CentralizedEventManager>, NopShMemProvider> { +impl + CentralizedEventManager< + NopEventManager>, + (), + NopState, + NopShMemProvider, + > +{ /// Creates a builder for [`CentralizedEventManager`] #[must_use] pub fn builder() -> CentralizedEventManagerBuilder { @@ -93,24 +105,29 @@ impl CentralizedEventManagerBuilder { Self { is_main } } - /// Creates a new `CentralizedEventManager` - pub fn build_from_client( + /// Creates a new [`CentralizedEventManager`]. + pub fn build_from_client( self, inner: EM, + hooks: EMH, client: LlmpClient, time_obs: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, EM: UsesState, + EMH: EventManagerHooksTuple, + S: State, + SP: ShMemProvider, { Ok(CentralizedEventManager { inner, + hooks, client, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor::with_threshold(COMPRESS_THRESHOLD), time_ref: time_obs, is_main: self.is_main, + phantom: PhantomData, }) } @@ -119,86 +136,105 @@ impl CentralizedEventManagerBuilder { /// If the port is not yet bound, it will act as a broker; otherwise, it /// will act as a client. #[cfg(feature = "std")] - pub fn build_on_port( + pub fn build_on_port( self, inner: EM, + hooks: EMH, shmem_provider: SP, port: u16, time_obs: Option>, - ) -> Result, Error> + ) -> Result, Error> where - SP: ShMemProvider, EM: UsesState, + EMH: EventManagerHooksTuple, + S: State, + SP: ShMemProvider, { let client = LlmpClient::create_attach_to_tcp(shmem_provider, port)?; Ok(CentralizedEventManager { inner, + hooks, client, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor::with_threshold(COMPRESS_THRESHOLD), time_ref: time_obs, is_main: self.is_main, + phantom: PhantomData, }) } /// If a client respawns, it may reuse the existing connection, previously /// stored by [`LlmpClient::to_env()`]. #[cfg(feature = "std")] - pub fn build_existing_client_from_env( + pub fn build_existing_client_from_env( self, inner: EM, + hooks: EMH, shmem_provider: SP, env_name: &str, time_obs: Option>, - ) -> Result, Error> + ) -> Result, Error> where EM: UsesState, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, { Ok(CentralizedEventManager { inner, + hooks, client: LlmpClient::on_existing_from_env(shmem_provider, env_name)?, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor::with_threshold(COMPRESS_THRESHOLD), time_ref: time_obs, is_main: self.is_main, + phantom: PhantomData, }) } /// Create an existing client from description #[cfg(feature = "std")] - pub fn existing_client_from_description( + pub fn existing_client_from_description( self, inner: EM, + hooks: EMH, shmem_provider: SP, description: &LlmpClientDescription, time_obs: Option>, - ) -> Result, Error> + ) -> Result, Error> where EM: UsesState, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, { Ok(CentralizedEventManager { inner, + hooks, client: LlmpClient::existing_client_from_description(shmem_provider, description)?, #[cfg(feature = "llmp_compression")] compressor: GzipCompressor::with_threshold(COMPRESS_THRESHOLD), time_ref: time_obs, is_main: self.is_main, + phantom: PhantomData, }) } } -impl UsesState for CentralizedEventManager +impl UsesState for CentralizedEventManager where EM: UsesState, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, { type State = EM::State; } -impl AdaptiveSerializer for CentralizedEventManager +impl AdaptiveSerializer for CentralizedEventManager where EM: AdaptiveSerializer + UsesState, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, { fn serialization_time(&self) -> Duration { @@ -232,15 +268,18 @@ where } } -impl EventFirer for CentralizedEventManager +impl EventFirer for CentralizedEventManager where EM: AdaptiveSerializer + EventFirer + HasEventManagerId, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, { fn should_send(&self) -> bool { self.inner.should_send() } + #[allow(clippy::match_same_arms)] fn fire( &mut self, state: &mut Self::State, @@ -251,25 +290,13 @@ where let mut is_tc = false; // Forward to main only if new tc or heartbeat let should_be_forwarded = match &mut event { - Event::NewTestcase { - input: _, - client_config: _, - exit_kind: _, - corpus_size: _, - time: _, - executions: _, - observers_buf: _, - forward_id, - } => { + Event::NewTestcase { forward_id, .. } => { *forward_id = Some(ClientId(self.inner.mgr_id().0 as u32)); is_tc = true; true } - Event::UpdateExecStats { - time: _, - executions: _, - phantom: _, - } => true, // send it but this guy won't be handled. the only purpose is to keep this client alive else the broker thinks it is dead and will dc it + Event::UpdateExecStats { .. } => true, // send it but this guy won't be handled. the only purpose is to keep this client alive else the broker thinks it is dead and will dc it + Event::Stop => true, _ => false, }; @@ -297,7 +324,7 @@ where fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> where - OT: ObserversTuple + Serialize, + OT: ObserversTuple + Serialize, { const SERIALIZE_TIME_FACTOR: u32 = 4; // twice as much as the normal llmp em's value cuz it does this job twice. const SERIALIZE_PERCENTAGE_THRESHOLD: usize = 80; @@ -313,9 +340,11 @@ where } } -impl EventRestarter for CentralizedEventManager +impl EventRestarter for CentralizedEventManager where EM: EventRestarter, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, { #[inline] @@ -337,15 +366,19 @@ where } } -impl EventProcessor for CentralizedEventManager +impl EventProcessor for CentralizedEventManager where EM: AdaptiveSerializer + EventProcessor + EventFirer + HasEventManagerId, - E: HasObservers + Executor, + EMH: EventManagerHooksTuple, + E: HasObservers + Executor, + E::Observers: + ObserversTuple<::Input, ::State> + Serialize, for<'a> E::Observers: Deserialize<'a>, - Z: EvaluatorObservers - + ExecutionProcessor, + S: State, Self::State: HasExecutions + HasMetadata, SP: ShMemProvider, + Z: EvaluatorObservers + + ExecutionProcessor, { fn process( &mut self, @@ -356,28 +389,40 @@ where if self.is_main { // main node self.receive_from_secondary(fuzzer, state, executor) + // self.inner.process(fuzzer, state, executor) } else { // The main node does not process incoming events from the broker ATM self.inner.process(fuzzer, state, executor) } } + + fn on_shutdown(&mut self) -> Result<(), Error> { + self.inner.on_shutdown()?; + self.client.sender_mut().send_exiting() + } } -impl EventManager for CentralizedEventManager +impl EventManager for CentralizedEventManager where + E: HasObservers + Executor, + E::Observers: + ObserversTuple<::Input, ::State> + Serialize, + for<'a> E::Observers: Deserialize<'a>, EM: AdaptiveSerializer + EventManager, EM::State: HasExecutions + HasMetadata + HasLastReportTime, - E: HasObservers + Executor, - for<'a> E::Observers: Deserialize<'a>, - Z: EvaluatorObservers - + ExecutionProcessor, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, + Z: EvaluatorObservers + + ExecutionProcessor, { } -impl HasCustomBufHandlers for CentralizedEventManager +impl HasCustomBufHandlers for CentralizedEventManager where EM: HasCustomBufHandlers, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, { /// Adds a custom buffer handler that will run for each incoming `CustomBuf` event. @@ -391,17 +436,21 @@ where } } -impl ProgressReporter for CentralizedEventManager +impl ProgressReporter for CentralizedEventManager where EM: AdaptiveSerializer + ProgressReporter + HasEventManagerId, EM::State: HasMetadata + HasExecutions + HasLastReportTime, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, { } -impl HasEventManagerId for CentralizedEventManager +impl HasEventManagerId for CentralizedEventManager where EM: HasEventManagerId + UsesState, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, { fn mgr_id(&self) -> EventManagerId { @@ -409,9 +458,11 @@ where } } -impl CentralizedEventManager +impl CentralizedEventManager where EM: UsesState, + EMH: EventManagerHooksTuple, + S: State, SP: ShMemProvider, { /// Describe the client event manager's LLMP parts in a restorable fashion @@ -432,9 +483,11 @@ where } } -impl CentralizedEventManager +impl CentralizedEventManager where EM: UsesState + EventFirer + AdaptiveSerializer + HasEventManagerId, + EMH: EventManagerHooksTuple, + S: State + Stoppable, SP: ShMemProvider, { #[cfg(feature = "llmp_compression")] @@ -477,11 +530,13 @@ where executor: &mut E, ) -> Result where - E: Executor + HasObservers::State>, + E: Executor::State> + HasObservers, + E::Observers: + ObserversTuple<::Input, ::State> + Serialize, ::State: UsesInput + HasExecutions + HasMetadata, for<'a> E::Observers: Deserialize<'a>, - Z: ExecutionProcessor::State> - + EvaluatorObservers, + Z: ExecutionProcessor::State> + + EvaluatorObservers, { // TODO: Get around local event copy by moving handle_in_client let self_id = self.client.sender().id(); @@ -508,6 +563,7 @@ where }; let event: Event<<::State as UsesInput>::Input> = postcard::from_bytes(event_bytes)?; + log::debug!("Processor received message {}", event.name_detailed()); self.handle_in_main(fuzzer, executor, state, client_id, event)?; count += 1; } @@ -524,12 +580,18 @@ where event: Event<<::State as UsesInput>::Input>, ) -> Result<(), Error> where - E: Executor + HasObservers::State>, + E: Executor::State> + HasObservers, + E::Observers: + ObserversTuple<::Input, ::State> + Serialize, ::State: UsesInput + HasExecutions + HasMetadata, - for<'a> E::Observers: Deserialize<'a>, - Z: ExecutionProcessor::State> - + EvaluatorObservers, + for<'a> E::Observers: Deserialize<'a> + Serialize, + Z: ExecutionProcessor::State> + + EvaluatorObservers, { + log::debug!("handle_in_main!"); + + let event_name = event.name_detailed(); + match event { Event::NewTestcase { input, @@ -538,10 +600,14 @@ where corpus_size, observers_buf, time, - executions, forward_id, + #[cfg(feature = "multi_machine")] + node_id, } => { - log::info!("Received new Testcase from {client_id:?} ({client_config:?}, forward {forward_id:?})"); + log::debug!( + "Received {} from {client_id:?} ({client_config:?}, forward {forward_id:?})", + event_name + ); let res = if client_config.match_with(&self.configuration()) && observers_buf.is_some() { @@ -551,7 +617,12 @@ where { state.scalability_monitor_mut().testcase_with_observers += 1; } - fuzzer.execute_and_process( + log::debug!( + "[{}] Running fuzzer with event {}", + process::id(), + event_name + ); + fuzzer.evaluate_execution( state, self, input.clone(), @@ -564,7 +635,12 @@ where { state.scalability_monitor_mut().testcase_without_observers += 1; } - fuzzer.evaluate_input_with_observers::( + log::debug!( + "[{}] Running fuzzer with event {}", + process::id(), + event_name + ); + fuzzer.evaluate_input_with_observers::( state, executor, self, @@ -574,30 +650,43 @@ where }; if let Some(item) = res.1 { - if res.1.is_some() { - self.inner.fire( - state, - Event::NewTestcase { - input, - client_config, - exit_kind, - corpus_size, - observers_buf, - time, - executions, - forward_id, - }, - )?; - } - log::info!("Added received Testcase as item #{item}"); + let event = Event::NewTestcase { + input, + client_config, + exit_kind, + corpus_size, + observers_buf, + time, + forward_id, + #[cfg(feature = "multi_machine")] + node_id, + }; + + self.hooks.on_fire_all(state, client_id, &event)?; + + log::debug!( + "[{}] Adding received Testcase {} as item #{item}...", + process::id(), + event_name + ); + + self.inner.fire(state, event)?; + } else { + log::debug!("[{}] {} was discarded...)", process::id(), event_name); } - Ok(()) } - _ => Err(Error::unknown(format!( - "Received illegal message that message should not have arrived: {:?}.", - event.name() - ))), + Event::Stop => { + state.request_stop(); + } + _ => { + return Err(Error::unknown(format!( + "Received illegal message that message should not have arrived: {:?}.", + event.name() + ))); + } } + + Ok(()) } } diff --git a/libafl/src/events/hooks/mod.rs b/libafl/src/events/events_hooks/mod.rs similarity index 65% rename from libafl/src/events/hooks/mod.rs rename to libafl/src/events/events_hooks/mod.rs index b10c0c8f0e..b34b1810f6 100644 --- a/libafl/src/events/hooks/mod.rs +++ b/libafl/src/events/events_hooks/mod.rs @@ -1,11 +1,12 @@ -//! Hooks for event managers, especifically these are used to hook before and and `handle_in_client`. +//! Hooks for event managers, especifically these are used to hook before `handle_in_client`. +//! //! This will allow user to define pre/post-processing code when the event manager receives any message from //! other clients use libafl_bolts::ClientId; use crate::{events::Event, state::State, Error}; -/// The hooks that are run before and after the event manager calls `handle_in_client` +/// The `broker_hooks` that are run before and after the event manager calls `handle_in_client` pub trait EventManagerHook where S: State, @@ -18,12 +19,25 @@ where client_id: ClientId, event: &Event, ) -> Result; + + /// Triggered when the even manager decides to fire the event after processing + fn on_fire( + &mut self, + _state: &mut S, + _client_id: ClientId, + _event: &Event, + ) -> Result<(), Error> { + Ok(()) + } + /// The hook that runs after `handle_in_client` /// Return false if you want to cancel the subsequent event handling - fn post_exec(&mut self, state: &mut S, client_id: ClientId) -> Result; + fn post_exec(&mut self, _state: &mut S, _client_id: ClientId) -> Result { + Ok(true) + } } -/// The tuples contains hooks to be executed for `handle_in_client` +/// The tuples contains `broker_hooks` to be executed for `handle_in_client` pub trait EventManagerHooksTuple where S: State, @@ -35,6 +49,15 @@ where client_id: ClientId, event: &Event, ) -> Result; + + /// Ran when the Event Manager decides to accept an event and propagates it + fn on_fire_all( + &mut self, + state: &mut S, + client_id: ClientId, + event: &Event, + ) -> Result<(), Error>; + /// The hook that runs after `handle_in_client` fn post_exec_all(&mut self, state: &mut S, client_id: ClientId) -> Result; } @@ -52,6 +75,16 @@ where ) -> Result { Ok(true) } + + fn on_fire_all( + &mut self, + _state: &mut S, + _client_id: ClientId, + _event: &Event, + ) -> Result<(), Error> { + Ok(()) + } + /// The hook that runs after `handle_in_client` fn post_exec_all(&mut self, _state: &mut S, _client_id: ClientId) -> Result { Ok(true) @@ -75,6 +108,17 @@ where let second = self.1.pre_exec_all(state, client_id, event)?; Ok(first & second) } + + fn on_fire_all( + &mut self, + state: &mut S, + client_id: ClientId, + event: &Event, + ) -> Result<(), Error> { + self.0.on_fire(state, client_id, event)?; + self.1.on_fire_all(state, client_id, event) + } + /// The hook that runs after `handle_in_client` fn post_exec_all(&mut self, state: &mut S, client_id: ClientId) -> Result { let first = self.0.post_exec(state, client_id)?; diff --git a/libafl/src/events/launcher.rs b/libafl/src/events/launcher.rs index 9ecfc978ee..50a4e457ac 100644 --- a/libafl/src/events/launcher.rs +++ b/libafl/src/events/launcher.rs @@ -12,55 +12,52 @@ //! On `Unix` systems, the [`Launcher`] will use `fork` if the `fork` feature is used for `LibAFL`. //! Else, it will start subsequent nodes with the same commandline, and will set special `env` variables accordingly. -use alloc::string::ToString; -#[cfg(feature = "std")] -use core::marker::PhantomData; -#[cfg(feature = "std")] -use core::time::Duration; use core::{ fmt::{self, Debug, Formatter}, num::NonZeroUsize, + time::Duration, }; -#[cfg(feature = "std")] -use std::net::SocketAddr; -#[cfg(all(feature = "std", any(windows, not(feature = "fork"))))] -use std::process::Stdio; -#[cfg(all(unix, feature = "std"))] -use std::{fs::File, os::unix::io::AsRawFd}; +use std::{net::SocketAddr, string::String}; -#[cfg(all(unix, feature = "std", feature = "fork"))] -use libafl_bolts::llmp::LlmpBroker; -#[cfg(all(unix, feature = "std"))] -use libafl_bolts::os::dup2; -#[cfg(all(feature = "std", any(windows, not(feature = "fork"))))] -use libafl_bolts::os::startable_self; -#[cfg(all(unix, feature = "std", feature = "fork"))] -use libafl_bolts::{ - core_affinity::get_core_ids, - os::{fork, ForkResult}, -}; use libafl_bolts::{ core_affinity::{CoreId, Cores}, shmem::ShMemProvider, tuples::{tuple_list, Handle}, }; -#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; use typed_builder::TypedBuilder; - -use super::hooks::EventManagerHooksTuple; -use crate::observers::TimeObserver; -#[cfg(all(unix, feature = "std", feature = "fork"))] -use crate::{ - events::{centralized::CentralizedEventManager, llmp::centralized::CentralizedLlmpHook}, - state::UsesState, +#[cfg(all(unix, feature = "fork"))] +use { + crate::{ + events::{centralized::CentralizedEventManager, CentralizedLlmpHook, StdLlmpEventHook}, + inputs::UsesInput, + state::UsesState, + }, + alloc::string::ToString, + libafl_bolts::{ + core_affinity::get_core_ids, + llmp::{Broker, Brokers, LlmpBroker}, + os::{fork, ForkResult}, + }, + std::boxed::Box, }; -#[cfg(feature = "std")] +#[cfg(unix)] +use { + libafl_bolts::os::dup2, + std::{fs::File, os::unix::io::AsRawFd}, +}; +#[cfg(any(windows, not(feature = "fork")))] +use {libafl_bolts::os::startable_self, std::process::Stdio}; + +#[cfg(all(unix, feature = "fork", feature = "multi_machine"))] +use crate::events::multi_machine::{NodeDescriptor, TcpMultiMachineHooks}; use crate::{ events::{ llmp::{LlmpRestartingEventManager, LlmpShouldSaveState, ManagerKind, RestartingMgr}, - EventConfig, + EventConfig, EventManagerHooksTuple, }, monitors::Monitor, + observers::TimeObserver, state::{HasExecutions, State}, Error, }; @@ -72,17 +69,69 @@ const _AFL_LAUNCHER_CLIENT: &str = "AFL_LAUNCHER_CLIENT"; #[cfg(all(feature = "fork", unix))] const LIBAFL_DEBUG_OUTPUT: &str = "LIBAFL_DEBUG_OUTPUT"; +/// Information about this client from the launcher +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ClientDescription { + id: usize, + overcommit_id: usize, + core_id: CoreId, +} + +impl ClientDescription { + /// Create a [`ClientDescription`] + #[must_use] + pub fn new(id: usize, overcommit_id: usize, core_id: CoreId) -> Self { + Self { + id, + overcommit_id, + core_id, + } + } + + /// Id unique to all clients spawned by this launcher + #[must_use] + pub fn id(&self) -> usize { + self.id + } + + /// [`CoreId`] this client is bound to + #[must_use] + pub fn core_id(&self) -> CoreId { + self.core_id + } + + /// Incremental id unique for all clients on the same core + #[must_use] + pub fn overcommit_id(&self) -> usize { + self.overcommit_id + } + + /// Create a string representation safe for environment variables + #[must_use] + pub fn to_safe_string(&self) -> String { + format!("{}_{}_{}", self.id, self.overcommit_id, self.core_id.0) + } + + /// Parse the string created by [`Self::to_safe_string`]. + #[must_use] + pub fn from_safe_string(input: &str) -> Self { + let mut iter = input.split('_'); + let id = iter.next().unwrap().parse().unwrap(); + let overcommit_id = iter.next().unwrap().parse().unwrap(); + let core_id = iter.next().unwrap().parse::().unwrap().into(); + Self { + id, + overcommit_id, + core_id, + } + } +} + /// Provides a [`Launcher`], which can be used to launch a fuzzing run on a specified list of cores /// /// Will hide child output, unless the settings indicate otherwise, or the `LIBAFL_DEBUG_OUTPUT` env variable is set. -#[cfg(feature = "std")] -#[allow( - clippy::type_complexity, - missing_debug_implementations, - clippy::ignored_unit_patterns -)] #[derive(TypedBuilder)] -pub struct Launcher<'a, CF, EMH, MT, S, SP> { +pub struct Launcher<'a, CF, MT, SP> { /// The `ShmemProvider` to use shmem_provider: SP, /// The monitor instance to use @@ -97,24 +146,27 @@ pub struct Launcher<'a, CF, EMH, MT, S, SP> { broker_port: u16, /// The list of cores to run on cores: &'a Cores, + /// The number of clients to spawn on each core + #[builder(default = 1)] + overcommit: usize, /// A file name to write all client output to - #[cfg(all(unix, feature = "std"))] + #[cfg(unix)] #[builder(default = None)] stdout_file: Option<&'a str>, /// The time in milliseconds to delay between child launches #[builder(default = 10)] launch_delay: u64, /// The actual, opened, `stdout_file` - so that we keep it open until the end - #[cfg(all(unix, feature = "std", feature = "fork"))] + #[cfg(all(unix, feature = "fork"))] #[builder(setter(skip), default = None)] opened_stdout_file: Option, /// A file name to write all client stderr output to. If not specified, output is sent to /// `stdout_file`. - #[cfg(all(unix, feature = "std"))] + #[cfg(unix)] #[builder(default = None)] stderr_file: Option<&'a str>, /// The actual, opened, `stdout_file` - so that we keep it open until the end - #[cfg(all(unix, feature = "std", feature = "fork"))] + #[cfg(all(unix, feature = "fork"))] #[builder(setter(skip), default = None)] opened_stderr_file: Option, /// The `ip:port` address of another broker to connect our new broker to for multi-machine @@ -133,18 +185,9 @@ pub struct Launcher<'a, CF, EMH, MT, S, SP> { /// Tell the manager to serialize or not the state on restart #[builder(default = LlmpShouldSaveState::OnRestart)] serialize_state: LlmpShouldSaveState, - #[builder(setter(skip), default = PhantomData)] - phantom_data: PhantomData<(&'a S, &'a SP, EMH)>, } -impl Debug for Launcher<'_, CF, EMH, MT, S, SP> -where - CF: FnOnce(Option, LlmpRestartingEventManager, CoreId) -> Result<(), Error>, - EMH: EventManagerHooksTuple, - MT: Monitor + Clone, - SP: ShMemProvider, - S: State, -{ +impl Debug for Launcher<'_, CF, MT, SP> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { let mut dbg_struct = f.debug_struct("Launcher"); dbg_struct @@ -153,7 +196,7 @@ where .field("core", &self.cores) .field("spawn_broker", &self.spawn_broker) .field("remote_broker_addr", &self.remote_broker_addr); - #[cfg(all(unix, feature = "std"))] + #[cfg(unix)] { dbg_struct .field("stdout_file", &self.stdout_file) @@ -164,41 +207,43 @@ where } } -impl<'a, CF, MT, S, SP> Launcher<'a, CF, (), MT, S, SP> +impl Launcher<'_, CF, MT, SP> where - CF: FnOnce(Option, LlmpRestartingEventManager<(), S, SP>, CoreId) -> Result<(), Error>, MT: Monitor + Clone, - S: State + HasExecutions, SP: ShMemProvider, { /// Launch the broker and the clients and fuzz - #[cfg(all(unix, feature = "std", feature = "fork"))] - pub fn launch(&mut self) -> Result<(), Error> { - Self::launch_with_hooks(self, tuple_list!()) - } - - /// Launch the broker and the clients and fuzz - #[cfg(all(feature = "std", any(windows, not(feature = "fork"))))] - #[allow(unused_mut, clippy::match_wild_err_arm)] - pub fn launch(&mut self) -> Result<(), Error> { + #[cfg(any(windows, not(feature = "fork"), all(unix, feature = "fork")))] + pub fn launch(&mut self) -> Result<(), Error> + where + S: State + HasExecutions, + CF: FnOnce( + Option, + LlmpRestartingEventManager<(), S, SP>, + ClientDescription, + ) -> Result<(), Error>, + { Self::launch_with_hooks(self, tuple_list!()) } } -#[cfg(feature = "std")] -impl<'a, CF, EMH, MT, S, SP> Launcher<'a, CF, EMH, MT, S, SP> +impl Launcher<'_, CF, MT, SP> where - CF: FnOnce(Option, LlmpRestartingEventManager, CoreId) -> Result<(), Error>, - EMH: EventManagerHooksTuple + Clone + Copy, MT: Monitor + Clone, - S: State + HasExecutions, SP: ShMemProvider, { /// Launch the broker and the clients and fuzz with a user-supplied hook - #[cfg(all(unix, feature = "std", feature = "fork"))] - #[allow(clippy::similar_names)] - #[allow(clippy::too_many_lines)] - pub fn launch_with_hooks(&mut self, hooks: EMH) -> Result<(), Error> { + #[cfg(all(unix, feature = "fork"))] + pub fn launch_with_hooks(&mut self, hooks: EMH) -> Result<(), Error> + where + S: State + HasExecutions, + EMH: EventManagerHooksTuple + Clone + Copy, + CF: FnOnce( + Option, + LlmpRestartingEventManager, + ClientDescription, + ) -> Result<(), Error>, + { if self.cores.ids.is_empty() { return Err(Error::illegal_argument( "No cores to spawn on given, cannot launch anything.", @@ -211,8 +256,7 @@ where )); } - let core_ids = get_core_ids().unwrap(); - let num_cores = core_ids.len(); + let core_ids = get_core_ids()?; let mut handles = vec![]; log::info!("spawning on cores: {:?}", self.cores); @@ -224,66 +268,74 @@ where .stderr_file .map(|filename| File::create(filename).unwrap()); - #[cfg(feature = "std")] let debug_output = std::env::var(LIBAFL_DEBUG_OUTPUT).is_ok(); // Spawn clients - let mut index = 0_u64; - for (id, bind_to) in core_ids.iter().enumerate().take(num_cores) { - if self.cores.ids.iter().any(|&x| x == id.into()) { - index += 1; - self.shmem_provider.pre_fork()?; - // # Safety - // Fork is safe in general, apart from potential side effects to the OS and other threads - match unsafe { fork() }? { - ForkResult::Parent(child) => { - self.shmem_provider.post_fork(false)?; - handles.push(child.pid); - #[cfg(feature = "std")] - log::info!("child spawned and bound to core {id}"); - } - ForkResult::Child => { - // # Safety - // A call to `getpid` is safe. - log::info!("{:?} PostFork", unsafe { libc::getpid() }); - self.shmem_provider.post_fork(true)?; + let mut index = 0_usize; + for bind_to in core_ids { + if self.cores.ids.iter().any(|&x| x == bind_to) { + for overcommit_id in 0..self.overcommit { + index += 1; + self.shmem_provider.pre_fork()?; + // # Safety + // Fork is safe in general, apart from potential side effects to the OS and other threads + match unsafe { fork() }? { + ForkResult::Parent(child) => { + self.shmem_provider.post_fork(false)?; + handles.push(child.pid); + log::info!( + "child spawned with id {index} and bound to core {bind_to:?}" + ); + } + ForkResult::Child => { + // # Safety + // A call to `getpid` is safe. + log::info!("{:?} PostFork", unsafe { libc::getpid() }); + self.shmem_provider.post_fork(true)?; - #[cfg(feature = "std")] - std::thread::sleep(Duration::from_millis(index * self.launch_delay)); + std::thread::sleep(Duration::from_millis( + index as u64 * self.launch_delay, + )); - #[cfg(feature = "std")] - if !debug_output { - if let Some(file) = &self.opened_stdout_file { - dup2(file.as_raw_fd(), libc::STDOUT_FILENO)?; - if let Some(stderr) = &self.opened_stderr_file { - dup2(stderr.as_raw_fd(), libc::STDERR_FILENO)?; - } else { - dup2(file.as_raw_fd(), libc::STDERR_FILENO)?; + if !debug_output { + if let Some(file) = &self.opened_stdout_file { + dup2(file.as_raw_fd(), libc::STDOUT_FILENO)?; + if let Some(stderr) = &self.opened_stderr_file { + dup2(stderr.as_raw_fd(), libc::STDERR_FILENO)?; + } else { + dup2(file.as_raw_fd(), libc::STDERR_FILENO)?; + } } } + + let client_description = + ClientDescription::new(index, overcommit_id, bind_to); + + // Fuzzer client. keeps retrying the connection to broker till the broker starts + let builder = RestartingMgr::::builder() + .shmem_provider(self.shmem_provider.clone()) + .broker_port(self.broker_port) + .kind(ManagerKind::Client { + client_description: client_description.clone(), + }) + .configuration(self.configuration) + .serialize_state(self.serialize_state) + .hooks(hooks); + let builder = builder.time_ref(self.time_ref.clone()); + let (state, mgr) = builder.build().launch()?; + + return (self.run_client.take().unwrap())( + state, + mgr, + client_description, + ); } - - // Fuzzer client. keeps retrying the connection to broker till the broker starts - let builder = RestartingMgr::::builder() - .shmem_provider(self.shmem_provider.clone()) - .broker_port(self.broker_port) - .kind(ManagerKind::Client { - cpu_core: Some(*bind_to), - }) - .configuration(self.configuration) - .serialize_state(self.serialize_state) - .hooks(hooks); - let builder = builder.time_ref(self.time_ref.clone()); - let (state, mgr) = builder.build().launch()?; - - return (self.run_client.take().unwrap())(state, mgr, *bind_to); - } - }; + }; + } } } if self.spawn_broker { - #[cfg(feature = "std")] log::info!("I am broker!!."); // TODO we don't want always a broker here, think about using different laucher process to spawn different configurations @@ -327,23 +379,32 @@ where } /// Launch the broker and the clients and fuzz - #[cfg(all(feature = "std", any(windows, not(feature = "fork"))))] - #[allow(unused_mut, clippy::match_wild_err_arm)] - pub fn launch_with_hooks(&mut self, hooks: EMH) -> Result<(), Error> { + #[cfg(any(windows, not(feature = "fork")))] + #[allow(clippy::too_many_lines, clippy::match_wild_err_arm)] + pub fn launch_with_hooks(&mut self, hooks: EMH) -> Result<(), Error> + where + S: State + HasExecutions, + EMH: EventManagerHooksTuple + Clone + Copy, + CF: FnOnce( + Option, + LlmpRestartingEventManager, + ClientDescription, + ) -> Result<(), Error>, + { use libafl_bolts::core_affinity; let is_client = std::env::var(_AFL_LAUNCHER_CLIENT); let mut handles = match is_client { Ok(core_conf) => { - let core_id = core_conf.parse()?; + let client_description = ClientDescription::from_safe_string(&core_conf); // the actual client. do the fuzzing let builder = RestartingMgr::::builder() .shmem_provider(self.shmem_provider.clone()) .broker_port(self.broker_port) .kind(ManagerKind::Client { - cpu_core: Some(CoreId(core_id)), + client_description: client_description.clone(), }) .configuration(self.configuration) .serialize_state(self.serialize_state) @@ -353,20 +414,19 @@ where let (state, mgr) = builder.build().launch()?; - return (self.run_client.take().unwrap())(state, mgr, CoreId(core_id)); + return (self.run_client.take().unwrap())(state, mgr, client_description); } Err(std::env::VarError::NotPresent) => { // I am a broker // before going to the broker loop, spawn n clients let core_ids = core_affinity::get_core_ids().unwrap(); - let num_cores = core_ids.len(); let mut handles = vec![]; log::info!("spawning on cores: {:?}", self.cores); let debug_output = std::env::var("LIBAFL_DEBUG_OUTPUT").is_ok(); - #[cfg(all(feature = "std", unix))] + #[cfg(unix)] { // Set own stdout and stderr as set by the user if !debug_output { @@ -387,34 +447,44 @@ where } } //spawn clients - for (id, _) in core_ids.iter().enumerate().take(num_cores) { - if self.cores.ids.iter().any(|&x| x == id.into()) { - // Forward own stdio to child processes, if requested by user - let (mut stdout, mut stderr) = (Stdio::null(), Stdio::null()); - #[cfg(all(feature = "std", unix))] - { - if self.stdout_file.is_some() || self.stderr_file.is_some() { - stdout = Stdio::inherit(); - stderr = Stdio::inherit(); - }; + let mut index = 0; + for core_id in core_ids { + if self.cores.ids.iter().any(|&x| x == core_id) { + for overcommit_i in 0..self.overcommit { + index += 1; + // Forward own stdio to child processes, if requested by user + #[allow(unused_mut)] + let (mut stdout, mut stderr) = (Stdio::null(), Stdio::null()); + #[cfg(unix)] + { + if self.stdout_file.is_some() || self.stderr_file.is_some() { + stdout = Stdio::inherit(); + stderr = Stdio::inherit(); + }; + } + + std::thread::sleep(Duration::from_millis( + core_id.0 as u64 * self.launch_delay, + )); + + let client_description = + ClientDescription::new(index, overcommit_i, core_id); + std::env::set_var( + _AFL_LAUNCHER_CLIENT, + client_description.to_safe_string(), + ); + let mut child = startable_self()?; + let child = (if debug_output { + &mut child + } else { + child.stdout(stdout); + child.stderr(stderr) + }) + .spawn()?; + handles.push(child); } - - #[cfg(feature = "std")] - std::thread::sleep(Duration::from_millis(id as u64 * self.launch_delay)); - - std::env::set_var(_AFL_LAUNCHER_CLIENT, id.to_string()); - let mut child = startable_self()?; - let child = (if debug_output { - &mut child - } else { - child.stdout(stdout); - child.stderr(stderr) - }) - .spawn()?; - handles.push(child); } } - handles } Err(_) => panic!("Env variables are broken, received non-unicode!"), @@ -429,7 +499,6 @@ where } if self.spawn_broker { - #[cfg(feature = "std")] log::info!("I am broker!!."); let builder = RestartingMgr::::builder() @@ -465,12 +534,13 @@ where } } +/// A Launcher that minimizes re-execution of shared testcases. +/// /// Provides a Launcher, which can be used to launch a fuzzing run on a specified list of cores with a single main and multiple secondary nodes /// This is for centralized, the 4th argument of the closure should mean if this is the main node. -#[cfg(all(unix, feature = "std", feature = "fork"))] +#[cfg(all(unix, feature = "fork"))] #[derive(TypedBuilder)] -#[allow(clippy::type_complexity, missing_debug_implementations)] -pub struct CentralizedLauncher<'a, CF, IM, MF, MT, S, SP> { +pub struct CentralizedLauncher<'a, CF, MF, MT, SP> { /// The `ShmemProvider` to use shmem_provider: SP, /// The monitor instance to use @@ -497,6 +567,9 @@ pub struct CentralizedLauncher<'a, CF, IM, MF, MT, S, SP> { time_obs: Option>, /// The list of cores to run on cores: &'a Cores, + /// The number of clients to spawn on each core + #[builder(default = 1)] + overcommit: usize, /// A file name to write all client output to #[builder(default = None)] stdout_file: Option<&'a str>, @@ -504,7 +577,7 @@ pub struct CentralizedLauncher<'a, CF, IM, MF, MT, S, SP> { #[builder(default = 10)] launch_delay: u64, /// The actual, opened, `stdout_file` - so that we keep it open until the end - #[cfg(all(unix, feature = "std", feature = "fork"))] + #[cfg(all(unix, feature = "fork"))] #[builder(setter(skip), default = None)] opened_stdout_file: Option, /// A file name to write all client stderr output to. If not specified, output is sent to @@ -512,14 +585,15 @@ pub struct CentralizedLauncher<'a, CF, IM, MF, MT, S, SP> { #[builder(default = None)] stderr_file: Option<&'a str>, /// The actual, opened, `stdout_file` - so that we keep it open until the end - #[cfg(all(unix, feature = "std", feature = "fork"))] + #[cfg(all(unix, feature = "fork"))] #[builder(setter(skip), default = None)] opened_stderr_file: Option, /// The `ip:port` address of another broker to connect our new broker to for multi-machine /// clusters. - #[builder(default = None)] remote_broker_addr: Option, + #[cfg(feature = "multi_machine")] + multi_machine_node_descriptor: NodeDescriptor, /// If this launcher should spawn a new `broker` on `[Self::broker_port]` (default). /// The reason you may not want this is, if you already have a [`Launcher`] /// with a different configuration (for the same target) running on this machine. @@ -529,17 +603,16 @@ pub struct CentralizedLauncher<'a, CF, IM, MF, MT, S, SP> { /// Tell the manager to serialize or not the state on restart #[builder(default = LlmpShouldSaveState::OnRestart)] serialize_state: LlmpShouldSaveState, - #[builder(setter(skip), default = PhantomData)] - phantom_data: PhantomData<(IM, &'a S, &'a SP)>, } -#[cfg(all(unix, feature = "std", feature = "fork"))] -impl Debug for CentralizedLauncher<'_, CF, IM, MF, MT, S, SP> { +#[cfg(all(unix, feature = "fork"))] +impl Debug for CentralizedLauncher<'_, CF, MF, MT, SP> { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("Launcher") .field("configuration", &self.configuration) .field("broker_port", &self.broker_port) - .field("core", &self.cores) + .field("cores", &self.cores) + .field("overcommit", &self.overcommit) .field("spawn_broker", &self.spawn_broker) .field("remote_broker_addr", &self.remote_broker_addr) .field("stdout_file", &self.stdout_file) @@ -551,74 +624,79 @@ impl Debug for CentralizedLauncher<'_, CF, IM, MF, MT, S, /// The standard inner manager of centralized pub type StdCentralizedInnerMgr = LlmpRestartingEventManager<(), S, SP>; -#[cfg(all(unix, feature = "std", feature = "fork"))] -impl<'a, CF, MF, MT, S, SP> - CentralizedLauncher<'a, CF, StdCentralizedInnerMgr, MF, MT, S, SP> +#[cfg(all(unix, feature = "fork"))] +impl CentralizedLauncher<'_, CF, MF, MT, SP> where - CF: FnOnce( - Option, - CentralizedEventManager, SP>, - CoreId, - ) -> Result<(), Error>, - MF: FnOnce( - Option, - CentralizedEventManager, SP>, - CoreId, - ) -> Result<(), Error>, - MT: Monitor + Clone, - S: State + HasExecutions, - SP: ShMemProvider, + MT: Monitor + Clone + 'static, + SP: ShMemProvider + 'static, { /// Launch a standard Centralized-based fuzzer - pub fn launch(&mut self) -> Result<(), Error> { - let restarting_mgr_builder = |centralized_launcher: &Self, core_to_bind: CoreId| { - // Fuzzer client. keeps retrying the connection to broker till the broker starts - let builder = RestartingMgr::<(), MT, S, SP>::builder() - .always_interesting(centralized_launcher.always_interesting) - .shmem_provider(centralized_launcher.shmem_provider.clone()) - .broker_port(centralized_launcher.broker_port) - .kind(ManagerKind::Client { - cpu_core: Some(core_to_bind), - }) - .configuration(centralized_launcher.configuration) - .serialize_state(centralized_launcher.serialize_state) - .hooks(tuple_list!()); + pub fn launch(&mut self) -> Result<(), Error> + where + S: State, + S::Input: Send + Sync + 'static, + CF: FnOnce( + Option, + CentralizedEventManager, (), S, SP>, + ClientDescription, + ) -> Result<(), Error>, + MF: FnOnce( + Option, + CentralizedEventManager, (), S, SP>, + ClientDescription, + ) -> Result<(), Error>, + { + let restarting_mgr_builder = + |centralized_launcher: &Self, client_description: ClientDescription| { + // Fuzzer client. keeps retrying the connection to broker till the broker starts + let builder = RestartingMgr::<(), MT, S, SP>::builder() + .always_interesting(centralized_launcher.always_interesting) + .shmem_provider(centralized_launcher.shmem_provider.clone()) + .broker_port(centralized_launcher.broker_port) + .kind(ManagerKind::Client { client_description }) + .configuration(centralized_launcher.configuration) + .serialize_state(centralized_launcher.serialize_state) + .hooks(tuple_list!()); - let builder = builder.time_ref(centralized_launcher.time_obs.clone()); + let builder = builder.time_ref(centralized_launcher.time_obs.clone()); - builder.build().launch() - }; + builder.build().launch() + }; self.launch_generic(restarting_mgr_builder, restarting_mgr_builder) } } -#[cfg(all(unix, feature = "std", feature = "fork"))] -impl<'a, CF, IM, MF, MT, S, SP> CentralizedLauncher<'a, CF, IM, MF, MT, S, SP> +#[cfg(all(unix, feature = "fork"))] +impl CentralizedLauncher<'_, CF, MF, MT, SP> where - CF: FnOnce(Option, CentralizedEventManager, CoreId) -> Result<(), Error>, - IM: UsesState, - MF: FnOnce( - Option, - CentralizedEventManager, // No hooks for centralized EM - CoreId, - ) -> Result<(), Error>, - MT: Monitor + Clone, - S: State + HasExecutions, - SP: ShMemProvider, + MT: Monitor + Clone + 'static, + SP: ShMemProvider + 'static, { /// Launch a Centralized-based fuzzer. /// - `main_inner_mgr_builder` will be called to build the inner manager of the main node. /// - `secondary_inner_mgr_builder` will be called to build the inner manager of the secondary nodes. - #[allow(clippy::similar_names)] - #[allow(clippy::too_many_lines)] - pub fn launch_generic( + pub fn launch_generic( &mut self, - main_inner_mgr_builder: IMF, - secondary_inner_mgr_builder: IMF, + main_inner_mgr_builder: EMB, + secondary_inner_mgr_builder: EMB, ) -> Result<(), Error> where - IMF: FnOnce(&Self, CoreId) -> Result<(Option, IM), Error>, + S: State, + S::Input: Send + Sync + 'static, + CF: FnOnce( + Option, + CentralizedEventManager, + ClientDescription, + ) -> Result<(), Error>, + EM: UsesState, + EMB: FnOnce(&Self, ClientDescription) -> Result<(Option, EM), Error>, + MF: FnOnce( + Option, + CentralizedEventManager, // No broker_hooks for centralized EM + ClientDescription, + ) -> Result<(), Error>, + <::State as UsesInput>::Input: Send + Sync + 'static, { let mut main_inner_mgr_builder = Some(main_inner_mgr_builder); let mut secondary_inner_mgr_builder = Some(secondary_inner_mgr_builder); @@ -636,10 +714,9 @@ where } let core_ids = get_core_ids().unwrap(); - let num_cores = core_ids.len(); let mut handles = vec![]; - log::info!("spawning on cores: {:?}", self.cores); + log::debug!("spawning on cores: {:?}", self.cores); self.opened_stdout_file = self .stdout_file @@ -650,147 +727,195 @@ where let debug_output = std::env::var(LIBAFL_DEBUG_OUTPUT).is_ok(); - // Spawn centralized broker - self.shmem_provider.pre_fork()?; - match unsafe { fork() }? { - ForkResult::Parent(child) => { - self.shmem_provider.post_fork(false)?; - handles.push(child.pid); - #[cfg(feature = "std")] - log::info!("PID: {:#?} centralized broker spawned", std::process::id()); - } - ForkResult::Child => { - log::info!("{:?} PostFork", unsafe { libc::getpid() }); - #[cfg(feature = "std")] - log::info!("PID: {:#?} I am centralized broker", std::process::id()); - self.shmem_provider.post_fork(true)?; - - let llmp_centralized_hook = CentralizedLlmpHook::::new()?; - - // TODO switch to false after solving the bug - let mut broker = LlmpBroker::with_keep_pages_attach_to_tcp( - self.shmem_provider.clone(), - tuple_list!(llmp_centralized_hook), - self.centralized_broker_port, - true, - )?; - - // Run in the broker until all clients exit - broker.loop_with_timeouts(Duration::from_secs(30), Some(Duration::from_millis(5))); - - log::info!("The last client quit. Exiting."); - - return Err(Error::shutting_down()); - } - } - - std::thread::sleep(Duration::from_millis(10)); - // Spawn clients - let mut index = 0_u64; - for (id, bind_to) in core_ids.iter().enumerate().take(num_cores) { - if self.cores.ids.iter().any(|&x| x == id.into()) { - index += 1; - self.shmem_provider.pre_fork()?; - match unsafe { fork() }? { - ForkResult::Parent(child) => { - self.shmem_provider.post_fork(false)?; - handles.push(child.pid); - #[cfg(feature = "std")] - log::info!("child spawned and bound to core {id}"); - } - ForkResult::Child => { - log::info!("{:?} PostFork", unsafe { libc::getpid() }); - self.shmem_provider.post_fork(true)?; + let mut index = 0_usize; + for bind_to in core_ids { + if self.cores.ids.iter().any(|&x| x == bind_to) { + for overcommit_id in 0..self.overcommit { + index += 1; + self.shmem_provider.pre_fork()?; + match unsafe { fork() }? { + ForkResult::Parent(child) => { + self.shmem_provider.post_fork(false)?; + handles.push(child.pid); + log::info!( + "child with client id {index} spawned and bound to core {bind_to:?}" + ); + } + ForkResult::Child => { + log::info!("{:?} PostFork", unsafe { libc::getpid() }); + self.shmem_provider.post_fork(true)?; - std::thread::sleep(Duration::from_millis(index * self.launch_delay)); + std::thread::sleep(Duration::from_millis( + index as u64 * self.launch_delay, + )); - if !debug_output { - if let Some(file) = &self.opened_stdout_file { - dup2(file.as_raw_fd(), libc::STDOUT_FILENO)?; - if let Some(stderr) = &self.opened_stderr_file { - dup2(stderr.as_raw_fd(), libc::STDERR_FILENO)?; - } else { - dup2(file.as_raw_fd(), libc::STDERR_FILENO)?; + if !debug_output { + if let Some(file) = &self.opened_stdout_file { + dup2(file.as_raw_fd(), libc::STDOUT_FILENO)?; + if let Some(stderr) = &self.opened_stderr_file { + dup2(stderr.as_raw_fd(), libc::STDERR_FILENO)?; + } else { + dup2(file.as_raw_fd(), libc::STDERR_FILENO)?; + } } } - } - if index == 1 { - // Main client - let (state, mgr) = - main_inner_mgr_builder.take().unwrap()(self, *bind_to)?; + let client_description = + ClientDescription::new(index, overcommit_id, bind_to); - let mut centralized_builder = CentralizedEventManager::builder(); - centralized_builder = centralized_builder.is_main(true); + if index == 1 { + // Main client + log::debug!("Running main client on PID {}", std::process::id()); + let (state, mgr) = main_inner_mgr_builder.take().unwrap()( + self, + client_description.clone(), + )?; - let c_mgr = centralized_builder.build_on_port( - mgr, - self.shmem_provider.clone(), - self.centralized_broker_port, - self.time_obs.clone(), - )?; + let mut centralized_event_manager_builder = + CentralizedEventManager::builder(); + centralized_event_manager_builder = + centralized_event_manager_builder.is_main(true); - self.main_run_client.take().unwrap()(state, c_mgr, *bind_to) - } else { - // Secondary clients - let (state, mgr) = - secondary_inner_mgr_builder.take().unwrap()(self, *bind_to)?; + let c_mgr = centralized_event_manager_builder.build_on_port( + mgr, + // tuple_list!(multi_machine_event_manager_hook.take().unwrap()), + tuple_list!(), + self.shmem_provider.clone(), + self.centralized_broker_port, + self.time_obs.clone(), + )?; - let centralized_builder = CentralizedEventManager::builder(); + self.main_run_client.take().unwrap()( + state, + c_mgr, + client_description, + )?; + Err(Error::shutting_down()) + } else { + // Secondary clients + log::debug!( + "Running secondary client on PID {}", + std::process::id() + ); + let (state, mgr) = secondary_inner_mgr_builder.take().unwrap()( + self, + client_description.clone(), + )?; - let c_mgr = centralized_builder.build_on_port( - mgr, - self.shmem_provider.clone(), - self.centralized_broker_port, - self.time_obs.clone(), - )?; + let centralized_builder = CentralizedEventManager::builder(); - self.secondary_run_client.take().unwrap()(state, c_mgr, *bind_to) - } - }?, - }; + let c_mgr = centralized_builder.build_on_port( + mgr, + tuple_list!(), + self.shmem_provider.clone(), + self.centralized_broker_port, + self.time_obs.clone(), + )?; + + self.secondary_run_client.take().unwrap()( + state, + c_mgr, + client_description, + )?; + Err(Error::shutting_down()) + } + }?, + }; + } } } + // Create this after forks, to avoid problems with tokio runtime + + // # Safety + // The `multi_machine_receiver_hook` needs messages to outlive the receiver. + // The underlying memory region for incoming messages lives longer than the async thread processing them. + #[cfg(feature = "multi_machine")] + let TcpMultiMachineHooks { + sender: multi_machine_sender_hook, + receiver: multi_machine_receiver_hook, + } = unsafe { + TcpMultiMachineHooks::builder() + .node_descriptor(self.multi_machine_node_descriptor.clone()) + .build::<<::State as UsesInput>::Input>()? + }; + + let mut brokers = Brokers::new(); + let exit_cleanly_after = NonZeroUsize::try_from(self.cores.ids.len()).unwrap(); + + // Add centralized broker + brokers.add(Box::new({ + #[cfg(feature = "multi_machine")] + let centralized_hooks = tuple_list!( + CentralizedLlmpHook::::new()?, + multi_machine_receiver_hook, + ); + + #[cfg(not(feature = "multi_machine"))] + let centralized_hooks = tuple_list!(CentralizedLlmpHook::::new()?); + + // TODO switch to false after solving the bug + let mut broker = LlmpBroker::with_keep_pages_attach_to_tcp( + self.shmem_provider.clone(), + centralized_hooks, + self.centralized_broker_port, + true, + )?; + broker.set_exit_after(exit_cleanly_after); + broker + })); + + #[cfg(feature = "multi_machine")] + assert!( + self.spawn_broker, + "Multi machine is not compatible with externally spawned brokers for now." + ); + + // If we should add another broker, add it to other brokers. if self.spawn_broker { log::info!("I am broker!!."); - // TODO we don't want always a broker here, think about using different laucher process to spawn different configurations - let builder = RestartingMgr::<(), MT, S, SP>::builder() - .shmem_provider(self.shmem_provider.clone()) - .monitor(Some(self.monitor.clone())) - .broker_port(self.broker_port) - .kind(ManagerKind::Broker) - .remote_broker_addr(self.remote_broker_addr) - .exit_cleanly_after(Some(NonZeroUsize::try_from(self.cores.ids.len()).unwrap())) - .configuration(self.configuration) - .serialize_state(self.serialize_state) - .hooks(tuple_list!()); + #[cfg(not(feature = "multi_machine"))] + let llmp_hook = + tuple_list!(StdLlmpEventHook::::new(self.monitor.clone())?); - let builder = builder.time_ref(self.time_obs.clone()); + #[cfg(feature = "multi_machine")] + let llmp_hook = tuple_list!( + StdLlmpEventHook::::new(self.monitor.clone())?, + multi_machine_sender_hook, + ); - builder.build().launch()?; + let mut broker = LlmpBroker::create_attach_to_tcp( + self.shmem_provider.clone(), + llmp_hook, + self.broker_port, + )?; - // Broker exited. kill all clients. - for handle in &handles { - unsafe { - libc::kill(*handle, libc::SIGINT); - } - } - } else { - for handle in &handles { - let mut status = 0; - log::info!("Not spawning broker (spawn_broker is false). Waiting for fuzzer children to exit..."); - unsafe { - libc::waitpid(*handle, &mut status, 0); - if status != 0 { - log::info!("Client with pid {handle} exited with status {status}"); - } - } + if let Some(remote_broker_addr) = self.remote_broker_addr { + log::info!("B2b: Connecting to {:?}", &remote_broker_addr); + broker.inner_mut().connect_b2b(remote_broker_addr)?; + }; + + broker.set_exit_after(exit_cleanly_after); + + brokers.add(Box::new(broker)); + } + log::debug!("Broker has been initialized; pid {}.", std::process::id()); + + // Loop over all the brokers that should be polled + brokers.loop_with_timeouts(Duration::from_secs(30), Some(Duration::from_millis(5))); + + #[cfg(feature = "llmp_debug")] + log::info!("The last client quit. Exiting."); + + // Brokers exited. kill all clients. + for handle in &handles { + unsafe { + libc::kill(*handle, libc::SIGINT); } } - Ok(()) + Err(Error::shutting_down()) } } diff --git a/libafl/src/events/llmp/mgr.rs b/libafl/src/events/llmp/mgr.rs index 20a44552c6..1153c30610 100644 --- a/libafl/src/events/llmp/mgr.rs +++ b/libafl/src/events/llmp/mgr.rs @@ -1,5 +1,5 @@ -/// An [`EventManager`] that forwards all events to other attached fuzzers on shared maps or via tcp, -/// using low-level message passing, [`llmp`]. +//! An [`crate::events::EventManager`] that forwards all events to other attached fuzzers on shared maps or via tcp, +//! using low-level message passing, [`libafl_bolts::llmp`]. #[cfg(feature = "std")] use alloc::string::ToString; @@ -15,7 +15,7 @@ use libafl_bolts::{ }; use libafl_bolts::{ current_time, - llmp::{LlmpClient, LlmpClientDescription}, + llmp::{LlmpClient, LlmpClientDescription, LLMP_FLAG_FROM_MM}, shmem::{NopShMemProvider, ShMemProvider}, tuples::Handle, ClientId, @@ -31,17 +31,16 @@ use serde::{Deserialize, Serialize}; use crate::events::llmp::COMPRESS_THRESHOLD; use crate::{ events::{ - hooks::EventManagerHooksTuple, llmp::{LLMP_TAG_EVENT_TO_BOTH, _LLMP_TAG_EVENT_TO_BROKER}, AdaptiveSerializer, CustomBufEventResult, CustomBufHandlerFn, Event, EventConfig, - EventFirer, EventManager, EventManagerId, EventProcessor, EventRestarter, - HasCustomBufHandlers, HasEventManagerId, ProgressReporter, + EventFirer, EventManager, EventManagerHooksTuple, EventManagerId, EventProcessor, + EventRestarter, HasCustomBufHandlers, HasEventManagerId, ProgressReporter, }, executors::{Executor, HasObservers}, fuzzer::{Evaluator, EvaluatorObservers, ExecutionProcessor}, inputs::{NopInput, UsesInput}, observers::{ObserversTuple, TimeObserver}, - state::{HasExecutions, HasLastReportTime, NopState, State, UsesState}, + state::{HasExecutions, HasImported, HasLastReportTime, NopState, State, UsesState}, Error, HasMetadata, }; @@ -168,10 +167,8 @@ impl LlmpEventManagerBuilder { }) } - /// Create an LLMP event manager on a port - /// - /// If the port is not yet bound, it will act as a broker; otherwise, it - /// will act as a client. + /// Create an LLMP event manager on a port. + /// It expects a broker to exist on this port. #[cfg(feature = "std")] pub fn build_on_port( self, @@ -370,7 +367,7 @@ where Ok(_) => (), Err(e) => log::error!("Failed to send tcp message {:#?}", e), } - log::info!("Asking he broker to be disconnected"); + log::debug!("Asking he broker to be disconnected"); Ok(()) } @@ -390,7 +387,7 @@ where impl LlmpEventManager where EMH: EventManagerHooksTuple, - S: State + HasExecutions + HasMetadata, + S: State + HasExecutions + HasMetadata + HasImported, SP: ShMemProvider, { // Handle arriving events in the client @@ -404,31 +401,33 @@ where event: Event, ) -> Result<(), Error> where - E: Executor + HasObservers, + E: Executor + HasObservers, + E::Observers: ObserversTuple + Serialize, for<'a> E::Observers: Deserialize<'a>, - Z: ExecutionProcessor - + EvaluatorObservers + Z: ExecutionProcessor + + EvaluatorObservers + Evaluator, { if !self.hooks.pre_exec_all(state, client_id, &event)? { return Ok(()); } + let evt_name = event.name_detailed(); match event { Event::NewTestcase { input, client_config, exit_kind, - corpus_size: _, observers_buf, - time: _, - executions: _, + #[cfg(feature = "std")] forward_id, + .. } => { - log::info!("Received new Testcase from {client_id:?} ({client_config:?}, forward {forward_id:?})"); + #[cfg(feature = "std")] + log::debug!("[{}] Received new Testcase {evt_name} from {client_id:?} ({client_config:?}, forward {forward_id:?})", std::process::id()); if self.always_interesting { let item = fuzzer.add_input(state, executor, self, input)?; - log::info!("Added received Testcase as item #{item}"); + log::debug!("Added received Testcase as item #{item}"); } else { let res = if client_config.match_with(&self.configuration) && observers_buf.is_some() @@ -443,20 +442,22 @@ where { state.scalability_monitor_mut().testcase_with_observers += 1; } - fuzzer.execute_and_process( - state, self, input, &observers, &exit_kind, false, - )? + fuzzer + .evaluate_execution(state, self, input, &observers, &exit_kind, false)? } else { #[cfg(feature = "scalability_introspection")] { state.scalability_monitor_mut().testcase_without_observers += 1; } - fuzzer.evaluate_input_with_observers::( + fuzzer.evaluate_input_with_observers::( state, executor, self, input, false, )? }; if let Some(item) = res.1 { - log::info!("Added received Testcase as item #{item}"); + *state.imported_mut() += 1; + log::debug!("Added received Testcase {evt_name} as item #{item}"); + } else { + log::debug!("Testcase {evt_name} was discarded"); } } } @@ -467,6 +468,9 @@ where } } } + Event::Stop => { + state.request_stop(); + } _ => { return Err(Error::unknown(format!( "Received illegal message that message should not have arrived: {:?}.", @@ -474,6 +478,7 @@ where ))); } } + self.hooks.post_exec_all(state, client_id)?; Ok(()) } @@ -548,7 +553,7 @@ where fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> where - OT: ObserversTuple + Serialize, + OT: ObserversTuple + Serialize, { const SERIALIZE_TIME_FACTOR: u32 = 2; const SERIALIZE_PERCENTAGE_THRESHOLD: usize = 80; @@ -580,12 +585,13 @@ where impl EventProcessor for LlmpEventManager where EMH: EventManagerHooksTuple, - S: State + HasExecutions + HasMetadata, + S: State + HasExecutions + HasMetadata + HasImported, SP: ShMemProvider, - E: HasObservers + Executor, + E: HasObservers + Executor, + E::Observers: ObserversTuple + Serialize, for<'a> E::Observers: Deserialize<'a>, - Z: ExecutionProcessor - + EvaluatorObservers + Z: ExecutionProcessor + + EvaluatorObservers + Evaluator, { fn process( @@ -597,7 +603,7 @@ where // TODO: Get around local event copy by moving handle_in_client let self_id = self.llmp.sender().id(); let mut count = 0; - while let Some((client_id, tag, _flags, msg)) = self.llmp.recv_buf_with_flags()? { + while let Some((client_id, tag, flags, msg)) = self.llmp.recv_buf_with_flags()? { assert!( tag != _LLMP_TAG_EVENT_TO_BROKER, "EVENT_TO_BROKER parcel should not have arrived in the client!" @@ -611,29 +617,42 @@ where #[cfg(feature = "llmp_compression")] let compressed; #[cfg(feature = "llmp_compression")] - let event_bytes = if _flags & LLMP_FLAG_COMPRESSED == LLMP_FLAG_COMPRESSED { + let event_bytes = if flags & LLMP_FLAG_COMPRESSED == LLMP_FLAG_COMPRESSED { compressed = self.compressor.decompress(msg)?; &compressed } else { msg }; let event: Event = postcard::from_bytes(event_bytes)?; + log::debug!("Received event in normal llmp {}", event.name_detailed()); + + // If the message comes from another machine, do not + // consider other events than new testcase. + if !event.is_new_testcase() && (flags & LLMP_FLAG_FROM_MM == LLMP_FLAG_FROM_MM) { + continue; + } + self.handle_in_client(fuzzer, executor, state, client_id, event)?; count += 1; } Ok(count) } + + fn on_shutdown(&mut self) -> Result<(), Error> { + self.send_exiting() + } } impl EventManager for LlmpEventManager where - E: HasObservers + Executor, + E: HasObservers + Executor, + E::Observers: ObserversTuple + Serialize, for<'a> E::Observers: Deserialize<'a>, EMH: EventManagerHooksTuple, - S: State + HasExecutions + HasMetadata + HasLastReportTime, + S: State + HasExecutions + HasMetadata + HasLastReportTime + HasImported, SP: ShMemProvider, - Z: ExecutionProcessor - + EvaluatorObservers + Z: ExecutionProcessor + + EvaluatorObservers + Evaluator, { } diff --git a/libafl/src/events/llmp/mod.rs b/libafl/src/events/llmp/mod.rs index 11686a7f90..bdbe32f4ba 100644 --- a/libafl/src/events/llmp/mod.rs +++ b/libafl/src/events/llmp/mod.rs @@ -20,7 +20,7 @@ use crate::{ executors::{Executor, HasObservers}, fuzzer::{EvaluatorObservers, ExecutionProcessor}, inputs::{Input, InputConverter, NopInput, NopInputConverter, UsesInput}, - state::{HasExecutions, NopState, State, UsesState}, + state::{HasExecutions, NopState, State, Stoppable, UsesState}, Error, HasMetadata, }; @@ -28,10 +28,6 @@ use crate::{ pub mod mgr; pub use mgr::*; -/// The llmp hooks -pub mod hooks; -pub use hooks::*; - /// The llmp restarting manager #[cfg(feature = "std")] pub mod restarting; @@ -39,17 +35,17 @@ pub mod restarting; pub use restarting::*; /// Forward this to the client -const _LLMP_TAG_EVENT_TO_CLIENT: Tag = Tag(0x2C11E471); +pub(crate) const _LLMP_TAG_EVENT_TO_CLIENT: Tag = Tag(0x2C11E471); /// Only handle this in the broker -const _LLMP_TAG_EVENT_TO_BROKER: Tag = Tag(0x2B80438); +pub(crate) const _LLMP_TAG_EVENT_TO_BROKER: Tag = Tag(0x2B80438); /// Handle in both /// -const LLMP_TAG_EVENT_TO_BOTH: Tag = Tag(0x2B0741); -const _LLMP_TAG_RESTART: Tag = Tag(0x8357A87); -const _LLMP_TAG_NO_RESTART: Tag = Tag(0x57A7EE71); +pub(crate) const LLMP_TAG_EVENT_TO_BOTH: Tag = Tag(0x2B0741); +pub(crate) const _LLMP_TAG_RESTART: Tag = Tag(0x8357A87); +pub(crate) const _LLMP_TAG_NO_RESTART: Tag = Tag(0x57A7EE71); /// The minimum buffer size at which to compress LLMP IPC messages. -#[cfg(any(feature = "llmp_compression", feature = "tcp_compression"))] +#[cfg(feature = "llmp_compression")] pub const COMPRESS_THRESHOLD: usize = 1024; /// Specify if the State must be persistent over restarts @@ -257,7 +253,7 @@ where impl LlmpEventConverter where - S: UsesInput + HasExecutions + HasMetadata, + S: UsesInput + HasExecutions + HasMetadata + Stoppable, SP: ShMemProvider, IC: InputConverter, ICB: InputConverter, @@ -297,29 +293,22 @@ where event: Event, ) -> Result<(), Error> where - E: Executor + HasObservers, + E: Executor + HasObservers, EM: UsesState + EventFirer, for<'a> E::Observers: Deserialize<'a>, - Z: ExecutionProcessor + EvaluatorObservers, + Z: ExecutionProcessor + EvaluatorObservers, { match event { Event::NewTestcase { - input, - client_config: _, - exit_kind: _, - corpus_size: _, - observers_buf: _, // Useless as we are converting between types - time: _, - executions: _, - forward_id, + input, forward_id, .. } => { - log::info!("Received new Testcase to convert from {client_id:?} (forward {forward_id:?}, forward {forward_id:?})"); + log::debug!("Received new Testcase to convert from {client_id:?} (forward {forward_id:?}, forward {forward_id:?})"); let Some(converter) = self.converter_back.as_mut() else { return Ok(()); }; - let res = fuzzer.evaluate_input_with_observers::( + let res = fuzzer.evaluate_input_with_observers::( state, executor, manager, @@ -340,6 +329,7 @@ where } Ok(()) } + Event::Stop => Ok(()), _ => Err(Error::unknown(format!( "Received illegal message that message should not have arrived: {:?}.", event.name() @@ -357,10 +347,10 @@ where manager: &mut EM, ) -> Result where - E: Executor + HasObservers, + E: Executor + HasObservers, EM: UsesState + EventFirer, for<'a> E::Observers: Deserialize<'a>, - Z: ExecutionProcessor + EvaluatorObservers, + Z: ExecutionProcessor + EvaluatorObservers, { // TODO: Get around local event copy by moving handle_in_client let self_id = self.llmp.sender().id(); @@ -387,6 +377,7 @@ where }; let event: Event = postcard::from_bytes(event_bytes)?; + log::debug!("Processor received message {}", event.name_detailed()); self.handle_in_client(fuzzer, executor, state, manager, client_id, event)?; count += 1; } @@ -440,8 +431,9 @@ where corpus_size, observers_buf, time, - executions, forward_id, + #[cfg(all(unix, feature = "std", feature = "multi_machine"))] + node_id, } => Event::NewTestcase { input: self.converter.as_mut().unwrap().convert(input)?, client_config, @@ -449,8 +441,9 @@ where corpus_size, observers_buf, time, - executions, forward_id, + #[cfg(all(unix, feature = "std", feature = "multi_machine"))] + node_id, }, Event::CustomBuf { buf, tag } => Event::CustomBuf { buf, tag }, _ => { @@ -495,8 +488,9 @@ where corpus_size, observers_buf, time, - executions, forward_id, + #[cfg(all(unix, feature = "std", feature = "multi_machine"))] + node_id, } => Event::NewTestcase { input: self.converter.as_mut().unwrap().convert(input)?, client_config, @@ -504,8 +498,9 @@ where corpus_size, observers_buf, time, - executions, forward_id, + #[cfg(all(unix, feature = "std", feature = "multi_machine"))] + node_id, }, Event::CustomBuf { buf, tag } => Event::CustomBuf { buf, tag }, _ => { diff --git a/libafl/src/events/llmp/restarting.rs b/libafl/src/events/llmp/restarting.rs index 989ed7e470..c76d89497d 100644 --- a/libafl/src/events/llmp/restarting.rs +++ b/libafl/src/events/llmp/restarting.rs @@ -3,9 +3,7 @@ //! When the target crashes, a watch process (the parent) will //! restart/refork it. -use alloc::vec::Vec; -#[cfg(all(unix, not(miri), feature = "std"))] -use core::ptr::addr_of_mut; +use alloc::{boxed::Box, vec::Vec}; #[cfg(feature = "std")] use core::sync::atomic::{compiler_fence, Ordering}; #[cfg(feature = "std")] @@ -22,35 +20,35 @@ use libafl_bolts::os::startable_self; use libafl_bolts::os::unix_signals::setup_signal_handler; #[cfg(all(feature = "std", feature = "fork", unix))] use libafl_bolts::os::{fork, ForkResult}; -use libafl_bolts::{ - llmp::LlmpBroker, - shmem::ShMemProvider, - tuples::{tuple_list, Handle}, -}; #[cfg(feature = "std")] use libafl_bolts::{ llmp::LlmpConnection, os::CTRL_C_EXIT, shmem::StdShMemProvider, staterestore::StateRestorer, }; +use libafl_bolts::{ + llmp::{Broker, LlmpBroker}, + shmem::ShMemProvider, + tuples::{tuple_list, Handle}, +}; use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use typed_builder::TypedBuilder; -#[cfg(feature = "std")] -use crate::events::AdaptiveSerializer; #[cfg(all(unix, feature = "std", not(miri)))] use crate::events::EVENTMGR_SIGHANDLER_STATE; +#[cfg(feature = "std")] +use crate::events::{AdaptiveSerializer, CustomBufEventResult, HasCustomBufHandlers}; use crate::{ events::{ - hooks::EventManagerHooksTuple, Event, EventConfig, EventFirer, EventManager, - EventManagerId, EventProcessor, EventRestarter, HasEventManagerId, LlmpEventManager, - LlmpShouldSaveState, ProgressReporter, StdLlmpEventHook, + launcher::ClientDescription, Event, EventConfig, EventFirer, EventManager, + EventManagerHooksTuple, EventManagerId, EventProcessor, EventRestarter, HasEventManagerId, + LlmpEventManager, LlmpShouldSaveState, ProgressReporter, StdLlmpEventHook, }, executors::{Executor, HasObservers}, fuzzer::{Evaluator, EvaluatorObservers, ExecutionProcessor}, inputs::UsesInput, monitors::Monitor, observers::{ObserversTuple, TimeObserver}, - state::{HasExecutions, HasLastReportTime, State, UsesState}, + state::{HasExecutions, HasImported, HasLastReportTime, State, UsesState}, Error, HasMetadata, }; @@ -149,7 +147,7 @@ where fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> where - OT: ObserversTuple + Serialize, + OT: ObserversTuple + Serialize, { self.llmp_mgr.serialize_observers(observers) } @@ -177,7 +175,7 @@ where fn on_restart(&mut self, state: &mut S) -> Result<(), Error> { state.on_restart()?; - // First, reset the page to 0 so the next iteration can read read from the beginning of this page + // First, reset the page to 0 so the next iteration can read from the beginning of this page self.staterestorer.reset(); self.staterestorer.save(&( if self.save_state.on_restart() { @@ -204,13 +202,14 @@ where #[cfg(feature = "std")] impl EventProcessor for LlmpRestartingEventManager where - E: HasObservers + Executor, Z>, + E: HasObservers + Executor, Z, State = S>, + E::Observers: ObserversTuple + Serialize, for<'a> E::Observers: Deserialize<'a>, EMH: EventManagerHooksTuple, - S: State + HasExecutions + HasMetadata, + S: State + HasExecutions + HasMetadata + HasImported, SP: ShMemProvider, - Z: ExecutionProcessor - + EvaluatorObservers + Z: ExecutionProcessor, E::Observers, State = S> + + EvaluatorObservers, E::Observers> + Evaluator>, { fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result { @@ -218,18 +217,23 @@ where self.intermediate_save()?; Ok(res) } + + fn on_shutdown(&mut self) -> Result<(), Error> { + self.send_exiting() + } } #[cfg(feature = "std")] impl EventManager for LlmpRestartingEventManager where - E: HasObservers + Executor, Z>, + E: HasObservers + Executor, Z, State = S>, + E::Observers: ObserversTuple + Serialize, for<'a> E::Observers: Deserialize<'a>, EMH: EventManagerHooksTuple, - S: State + HasExecutions + HasMetadata + HasLastReportTime, + S: State + HasExecutions + HasMetadata + HasLastReportTime + HasImported, SP: ShMemProvider, - Z: ExecutionProcessor - + EvaluatorObservers + Z: ExecutionProcessor, E::Observers, State = S> + + EvaluatorObservers, E::Observers> + Evaluator>, { } @@ -245,6 +249,20 @@ where } } +#[cfg(feature = "std")] +impl HasCustomBufHandlers for LlmpRestartingEventManager +where + S: State, + SP: ShMemProvider, +{ + fn add_custom_buf_handler( + &mut self, + handler: Box Result>, + ) { + self.llmp_mgr.add_custom_buf_handler(handler); + } +} + /// The llmp connection from the actual fuzzer to the process supervising it const _ENV_FUZZER_SENDER: &str = "_AFL_ENV_FUZZER_SENDER"; const _ENV_FUZZER_RECEIVER: &str = "_AFL_ENV_FUZZER_RECEIVER"; @@ -304,20 +322,21 @@ where /// The kind of manager we're creating right now #[cfg(feature = "std")] -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] pub enum ManagerKind { /// Any kind will do Any, /// A client, getting messages from a local broker. Client { - /// The CPU core ID of this client - cpu_core: Option, + /// The client description + client_description: ClientDescription, }, /// An [`LlmpBroker`], forwarding the packets of local clients. Broker, } /// Sets up a restarting fuzzer, using the [`StdShMemProvider`], and standard features. +/// /// The restarting mgr is a combination of restarter and runner, that can be used on systems with and without `fork` support. /// The restarter will spawn a new process each time the child crashes or timeouts. #[cfg(feature = "std")] @@ -348,6 +367,7 @@ where } /// Sets up a restarting fuzzer, using the [`StdShMemProvider`], and standard features. +/// /// The restarting mgr is a combination of restarter and runner, that can be used on systems with and without `fork` support. /// The restarter will spawn a new process each time the child crashes or timeouts. /// This one, additionally uses the timeobserver for the adaptive serialization @@ -380,20 +400,15 @@ where .launch() } -/// Provides a `builder` which can be used to build a [`RestartingMgr`], which is a combination of a +/// Provides a `builder` which can be used to build a [`RestartingMgr`]. +/// +/// The [`RestartingMgr`] is is a combination of a /// `restarter` and `runner`, that can be used on systems both with and without `fork` support. The /// `restarter` will start a new process each time the child crashes or times out. #[cfg(feature = "std")] #[allow(clippy::default_trait_access, clippy::ignored_unit_patterns)] #[derive(TypedBuilder, Debug)] -pub struct RestartingMgr -where - EMH: EventManagerHooksTuple, - S: State, - SP: ShMemProvider, - MT: Monitor, - //CE: CustomEvent, -{ +pub struct RestartingMgr { /// The shared memory provider to use for the broker or client spawned by the restarting /// manager. shmem_provider: SP, @@ -444,7 +459,7 @@ where { /// Launch the broker and the clients and fuzz pub fn launch(&mut self) -> Result<(Option, LlmpRestartingEventManager), Error> { - // We start ourself as child process to actually fuzz + // We start ourselves as child process to actually fuzz let (staterestorer, new_shmem_provider, core_id) = if std::env::var(_ENV_FUZZER_SENDER) .is_err() { @@ -455,9 +470,7 @@ where }; if let Some(exit_cleanly_after) = self.exit_cleanly_after { - broker - .inner_mut() - .set_exit_cleanly_after(exit_cleanly_after); + broker.set_exit_after(exit_cleanly_after); } broker.loop_with_timeouts(Duration::from_secs(30), Some(Duration::from_millis(5))); @@ -468,7 +481,7 @@ where Err(Error::shutting_down()) }; // We get here if we are on Unix, or we are a broker on Windows (or without forks). - let (mgr, core_id) = match self.kind { + let (mgr, core_id) = match &self.kind { ManagerKind::Any => { let connection = LlmpConnection::on_port(self.shmem_provider.clone(), self.broker_port)?; @@ -515,7 +528,7 @@ where broker_things(broker, self.remote_broker_addr)?; unreachable!("The broker may never return normally, only on errors or when shutting down."); } - ManagerKind::Client { cpu_core } => { + ManagerKind::Client { client_description } => { // We are a client let mgr = LlmpEventManager::builder() .always_interesting(self.always_interesting) @@ -527,7 +540,7 @@ where self.time_ref.clone(), )?; - (mgr, cpu_core) + (mgr, Some(client_description.core_id())) } }; @@ -569,13 +582,18 @@ where handle.status() } ForkResult::Child => { + log::debug!( + "{} has been forked into {}", + std::os::unix::process::parent_id(), + std::process::id() + ); self.shmem_provider.post_fork(true)?; break (staterestorer, self.shmem_provider.clone(), core_id); } } }; - // If this guy wants to fork, then ignore sigit + // If this guy wants to fork, then ignore sigint #[cfg(any(windows, not(feature = "fork")))] unsafe { #[cfg(windows)] @@ -594,7 +612,7 @@ where #[cfg(any(windows, not(feature = "fork")))] let child_status = child_status.code().unwrap_or_default(); - compiler_fence(Ordering::SeqCst); + compiler_fence(Ordering::SeqCst); // really useful? if child_status == CTRL_C_EXIT || staterestorer.wants_to_exit() { // if ctrl-c is pressed, we end up in this branch @@ -633,7 +651,7 @@ where // At this point we are the fuzzer *NOT* the restarter. // We setup signal handlers to clean up shmem segments used by state restorer #[cfg(all(unix, not(miri)))] - if let Err(_e) = unsafe { setup_signal_handler(addr_of_mut!(EVENTMGR_SIGHANDLER_STATE)) } { + if let Err(_e) = unsafe { setup_signal_handler(&raw mut EVENTMGR_SIGHANDLER_STATE) } { // We can live without a proper ctrl+c signal handler. Print and ignore. log::error!("Failed to setup signal handlers: {_e}"); } diff --git a/libafl/src/events/mod.rs b/libafl/src/events/mod.rs index bc7cedd108..81ab7f7d32 100644 --- a/libafl/src/events/mod.rs +++ b/libafl/src/events/mod.rs @@ -1,7 +1,8 @@ //! An [`EventManager`] manages all events that go to other instances of the fuzzer. //! The messages are commonly information about new Testcases as well as stats and other [`Event`]s. -pub mod hooks; +pub mod events_hooks; +pub use events_hooks::*; pub mod simple; pub use simple::*; @@ -15,10 +16,11 @@ pub mod launcher; #[allow(clippy::ignored_unit_patterns)] pub mod llmp; pub use llmp::*; - #[cfg(feature = "tcp_manager")] #[allow(clippy::ignored_unit_patterns)] pub mod tcp; + +pub mod broker_hooks; use alloc::{borrow::Cow, boxed::Box, string::String, vec::Vec}; use core::{ fmt, @@ -28,14 +30,16 @@ use core::{ }; use ahash::RandomState; +pub use broker_hooks::*; #[cfg(feature = "std")] pub use launcher::*; #[cfg(all(unix, feature = "std"))] -use libafl_bolts::os::unix_signals::{siginfo_t, ucontext_t, Handler, Signal, CTRL_C_EXIT}; +use libafl_bolts::os::unix_signals::{siginfo_t, ucontext_t, Signal, SignalHandler}; +#[cfg(all(unix, feature = "std"))] +use libafl_bolts::os::CTRL_C_EXIT; use libafl_bolts::{ current_time, tuples::{Handle, MatchNameRef}, - ClientId, }; use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] @@ -57,11 +61,16 @@ use crate::{ state::HasScalabilityMonitor, }; +/// Multi-machine mode +#[cfg(all(unix, feature = "std", feature = "multi_machine"))] +pub mod multi_machine; + /// Check if ctrl-c is sent with this struct #[cfg(all(unix, feature = "std"))] pub static mut EVENTMGR_SIGHANDLER_STATE: ShutdownSignalData = ShutdownSignalData {}; -/// A signal handler for catching ctrl-c. +/// A signal handler for catching `ctrl-c`. +/// /// The purpose of this signal handler is solely for calling `exit()` with a specific exit code 100 /// In this way, the restarting manager can tell that we really want to exit #[cfg(all(unix, feature = "std"))] @@ -70,18 +79,18 @@ pub struct ShutdownSignalData {} /// Shutdown handler. `SigTerm`, `SigInterrupt`, `SigQuit` call this /// We can't handle SIGKILL in the signal handler, this means that you shouldn't kill your fuzzer with `kill -9` because then the shmem segments are never freed +/// +/// # Safety +/// This will exit the program #[cfg(all(unix, feature = "std"))] -impl Handler for ShutdownSignalData { - fn handle( +impl SignalHandler for ShutdownSignalData { + unsafe fn handle( &mut self, _signal: Signal, _info: &mut siginfo_t, _context: Option<&mut ucontext_t>, ) { - // println!("in handler! {}", std::process::id()); unsafe { - // println!("Exiting from the handler...."); - #[cfg(unix)] libc::_exit(CTRL_C_EXIT); @@ -104,10 +113,12 @@ pub struct EventManagerId( pub usize, ); +#[cfg(all(unix, feature = "std", feature = "multi_machine"))] +use crate::events::multi_machine::NodeId; #[cfg(feature = "introspection")] use crate::monitors::ClientPerfMonitor; use crate::{ - inputs::UsesInput, observers::TimeObserver, stages::HasCurrentStage, state::UsesState, + inputs::UsesInput, observers::TimeObserver, stages::HasCurrentStageId, state::UsesState, }; /// The log event severity @@ -201,7 +212,7 @@ impl EventConfig { } } - /// Match if the currenti [`EventConfig`] matches another given config + /// Match if the current [`EventConfig`] matches another given config #[must_use] pub fn match_with(&self, other: &EventConfig) -> bool { match self { @@ -275,10 +286,11 @@ where client_config: EventConfig, /// The time of generation of the event time: Duration, - /// The executions of this client - executions: u64, /// The original sender if, if forwarded - forward_id: Option, + forward_id: Option, + /// The (multi-machine) node from which the tc is from, if any + #[cfg(all(unix, feature = "std", feature = "multi_machine"))] + node_id: Option, }, /// New stats event to monitor. UpdateExecStats { @@ -315,8 +327,6 @@ where Objective { /// Objective corpus size objective_size: usize, - /// The total number of executions when this objective is found - executions: u64, /// The time when this event was created time: Duration, }, @@ -336,6 +346,8 @@ where /// Tag of this buffer tag: String, }, + /// Exit gracefully + Stop, /*/// A custom type Custom { // TODO: Allow custom events @@ -347,50 +359,51 @@ impl Event where I: Input, { - fn name(&self) -> &str { + /// Event's corresponding name + pub fn name(&self) -> &str { match self { - Event::NewTestcase { - input: _, - client_config: _, - corpus_size: _, - exit_kind: _, - observers_buf: _, - time: _, - executions: _, - forward_id: _, - } => "Testcase", - Event::UpdateExecStats { - time: _, - executions: _, - phantom: _, - } => "Client Heartbeat", - Event::UpdateUserStats { - name: _, - value: _, - phantom: _, - } => "UserStats", + Event::NewTestcase { .. } => "Testcase", + Event::UpdateExecStats { .. } => "Client Heartbeat", + Event::UpdateUserStats { .. } => "UserStats", #[cfg(feature = "introspection")] - Event::UpdatePerfMonitor { - time: _, - executions: _, - introspection_monitor: _, - phantom: _, - } => "PerfMonitor", + Event::UpdatePerfMonitor { .. } => "PerfMonitor", Event::Objective { .. } => "Objective", - Event::Log { - severity_level: _, - message: _, - phantom: _, - } => "Log", + Event::Log { .. } => "Log", Event::CustomBuf { .. } => "CustomBuf", /*Event::Custom { sender_id: _, /*custom_event} => custom_event.name()*/ } => "todo",*/ + Event::Stop => "Stop", + } + } + + /// Event's corresponding name with additional info + fn name_detailed(&self) -> Cow<'static, str> { + match self { + Event::NewTestcase { input, .. } => { + Cow::Owned(format!("Testcase {}", input.generate_name(None))) + } + Event::UpdateExecStats { .. } => Cow::Borrowed("Client Heartbeat"), + Event::UpdateUserStats { .. } => Cow::Borrowed("UserStats"), + #[cfg(feature = "introspection")] + Event::UpdatePerfMonitor { .. } => Cow::Borrowed("PerfMonitor"), + Event::Objective { .. } => Cow::Borrowed("Objective"), + Event::Log { .. } => Cow::Borrowed("Log"), + Event::CustomBuf { .. } => Cow::Borrowed("CustomBuf"), + Event::Stop => Cow::Borrowed("Stop"), + /*Event::Custom { + sender_id: _, /*custom_event} => custom_event.name()*/ + } => "todo",*/ } } + + /// Returns true if self is a new testcase, false otherwise. + pub fn is_new_testcase(&self) -> bool { + matches!(self, Event::NewTestcase { .. }) + } } -/// [`EventFirer`] fire an event. +/// [`EventFirer`] fires an event. pub trait EventFirer: UsesState { /// Send off an [`Event`] to the broker /// @@ -427,7 +440,7 @@ pub trait EventFirer: UsesState { /// Serialize all observers for this type and manager fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> where - OT: ObserversTuple + Serialize, + OT: ObserversTuple<::Input, Self::State> + Serialize, { Ok(Some(postcard::to_allocvec(observers)?)) } @@ -535,7 +548,7 @@ where /// Restartable trait pub trait EventRestarter: UsesState { /// For restarting event managers, implement a way to forward state to their next peers. - /// You *must* ensure that [`HasCurrentStage::on_restart`] will be invoked in this method, by you + /// You *must* ensure that [`HasCurrentStageId::on_restart`] will be invoked in this method, by you /// or an internal [`EventRestarter`], before the state is saved for recovery. #[inline] fn on_restart(&mut self, state: &mut Self::State) -> Result<(), Error> { @@ -565,6 +578,9 @@ pub trait EventProcessor: UsesState { state: &mut Self::State, executor: &mut E, ) -> Result; + + /// Shutdown gracefully; typically without saving state. + fn on_shutdown(&mut self) -> Result<(), Error>; } /// The id of this [`EventManager`]. /// For multi processed [`EventManager`]s, @@ -653,6 +669,10 @@ where ) -> Result { Ok(0) } + + fn on_shutdown(&mut self) -> Result<(), Error> { + Ok(()) + } } impl EventManager for NopEventManager where @@ -740,7 +760,7 @@ where #[inline] fn serialize_observers(&mut self, observers: &OT) -> Result>, Error> where - OT: ObserversTuple + Serialize, + OT: ObserversTuple<::Input, Self::State> + Serialize, { self.inner.serialize_observers(observers) } @@ -784,6 +804,10 @@ where ) -> Result { self.inner.process(fuzzer, state, executor) } + + fn on_shutdown(&mut self) -> Result<(), Error> { + self.inner.on_shutdown() + } } impl EventManager for MonitorTypedEventManager @@ -872,7 +896,7 @@ pub trait AdaptiveSerializer { percentage_threshold: usize, ) -> Result>, Error> where - OT: ObserversTuple + Serialize, + OT: ObserversTuple + Serialize, S: UsesInput, { match self.time_ref() { @@ -917,8 +941,6 @@ pub trait AdaptiveSerializer { #[cfg(test)] mod tests { - use core::ptr::addr_of_mut; - use libafl_bolts::{current_time, tuples::tuple_list, Named}; use tuple_list::tuple_list_type; @@ -933,8 +955,10 @@ mod tests { #[test] fn test_event_serde() { + let map_ptr = &raw const MAP; let obv = unsafe { - StdMapObserver::from_mut_ptr("test", addr_of_mut!(MAP) as *mut u32, MAP.len()) + let len = (*map_ptr).len(); + StdMapObserver::from_mut_ptr("test", &raw mut MAP as *mut u32, len) }; let map = tuple_list!(obv); let observers_buf = postcard::to_allocvec(&map).unwrap(); @@ -947,24 +971,16 @@ mod tests { corpus_size: 123, client_config: EventConfig::AlwaysUnique, time: current_time(), - executions: 0, forward_id: None, + #[cfg(all(unix, feature = "std", feature = "multi_machine"))] + node_id: None, }; let serialized = postcard::to_allocvec(&e).unwrap(); let d = postcard::from_bytes::>(&serialized).unwrap(); match d { - Event::NewTestcase { - input: _, - observers_buf, - corpus_size: _, - exit_kind: _, - client_config: _, - time: _, - executions: _, - forward_id: _, - } => { + Event::NewTestcase { observers_buf, .. } => { let o: tuple_list_type!(StdMapObserver::) = postcard::from_bytes(observers_buf.as_ref().unwrap()).unwrap(); assert_eq!("test", o.0.name()); diff --git a/libafl/src/events/multi_machine.rs b/libafl/src/events/multi_machine.rs new file mode 100644 index 0000000000..f55e41ea24 --- /dev/null +++ b/libafl/src/events/multi_machine.rs @@ -0,0 +1,620 @@ +use core::fmt::Display; +use std::{ + boxed::Box, + collections::HashMap, + io::ErrorKind, + process, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, OnceLock, + }, + time::Duration, + vec::Vec, +}; + +use enumflags2::{bitflags, BitFlags}; +#[cfg(feature = "llmp_compression")] +use libafl_bolts::compress::GzipCompressor; +use libafl_bolts::{current_time, ownedref::OwnedRef, Error}; +use serde::{Deserialize, Serialize}; +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::{TcpListener, TcpStream, ToSocketAddrs}, + runtime::Runtime, + sync::RwLock, + task::JoinHandle, + time, +}; +use typed_builder::TypedBuilder; + +use crate::{ + events::{Event, TcpMultiMachineLlmpReceiverHook, TcpMultiMachineLlmpSenderHook}, + inputs::{Input, NopInput}, +}; + +// const MAX_NB_RECEIVED_AT_ONCE: usize = 100; + +#[bitflags(default = SendToParent | SendToChildren)] +#[repr(u8)] +#[derive(Copy, Clone, Debug, PartialEq)] +/// The node policy. It represents flags that can be applied to the node to change how it behaves. +pub enum NodePolicy { + /// Send current node's interesting inputs to parent. + SendToParent, + /// Send current node's interesting inputs to children. + SendToChildren, +} + +const DUMMY_BYTE: u8 = 0x14; + +/// Use `OwnedRef` as much as possible here to avoid useless copies. +/// An owned TCP message for multi machine +#[derive(Clone, Debug)] +// #[serde(bound = "I: serde::de::DeserializeOwned")] +pub enum MultiMachineMsg<'a, I> +where + I: Input, +{ + /// A raw llmp message (not deserialized) + LlmpMsg(OwnedRef<'a, [u8]>), + + /// A `LibAFL` Event (already deserialized) + Event(OwnedRef<'a, Event>), +} + +/// We do not use raw pointers, so no problem with thead-safety +unsafe impl Send for MultiMachineMsg<'_, I> {} +unsafe impl Sync for MultiMachineMsg<'_, I> {} + +impl<'a, I> MultiMachineMsg<'a, I> +where + I: Input, +{ + /// Create a new [`MultiMachineMsg`] as event. + /// + /// # Safety + /// + /// `OwnedRef` should **never** be a raw pointer for thread-safety reasons. + /// We check this for debug builds, but not for release. + #[must_use] + pub unsafe fn event(event: OwnedRef<'a, Event>) -> Self { + debug_assert!(!event.is_raw()); + + MultiMachineMsg::Event(event) + } + + /// Create a new [`MultiMachineMsg`] from an llmp msg. + #[must_use] + pub fn llmp_msg(msg: OwnedRef<'a, [u8]>) -> Self { + MultiMachineMsg::LlmpMsg(msg) + } + + /// Get the message + #[must_use] + pub fn serialize_as_ref(&self) -> &[u8] { + match self { + MultiMachineMsg::LlmpMsg(msg) => msg.as_ref(), + MultiMachineMsg::Event(_) => { + panic!("Not supported") + } + } + } + + /// To owned message + #[must_use] + pub fn from_llmp_msg(msg: Box<[u8]>) -> MultiMachineMsg<'a, I> { + MultiMachineMsg::LlmpMsg(OwnedRef::Owned(msg)) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default, Serialize, Deserialize)] +/// A `NodeId` (unused for now) +pub struct NodeId(pub u64); + +impl NodeId { + /// Generate a unique [`NodeId`]. + pub fn new() -> Self { + static CTR: OnceLock = OnceLock::new(); + let ctr = CTR.get_or_init(|| AtomicU64::new(0)); + NodeId(ctr.fetch_add(1, Ordering::Relaxed)) + } +} + +/// The state of the hook shared between the background threads and the main thread. +#[derive(Debug)] +#[allow(dead_code)] +pub struct TcpMultiMachineState { + node_descriptor: NodeDescriptor, + /// the parent to which the testcases should be forwarded when deemed interesting + parent: Option, + /// The children who connected during the fuzzing session. + children: HashMap, // The children who connected during the fuzzing session. + old_msgs: Vec>, + #[cfg(feature = "llmp_compression")] + compressor: GzipCompressor, +} + +/// The tree descriptor for the +#[derive(Debug, Clone, TypedBuilder)] +pub struct NodeDescriptor { + /// The parent address, if there is one. + pub parent_addr: Option, + + /// The node listening port. Defaults to 50000 + #[builder(default = Some(50000))] + pub node_listening_port: Option, + + #[builder(default = Duration::from_secs(60))] + /// The timeout for connecting to parent + pub timeout: Duration, + + /// Node flags + #[builder(default_code = "BitFlags::default()")] + pub flags: BitFlags, // The policy for shared messages between nodes. +} + +/// A set of multi-machine `broker_hooks`. +/// +/// Beware, the hooks should run in the same process as the one this function is called. +/// This is because we spawn a tokio runtime underneath. +/// Check `` for more details. +/// +/// Use `TcpMultiMachineHooks::builder()` to initialize the hooks. +/// +/// # Safety +/// The [`TcpMultiMachineLlmpReceiverHook`] assumes that the `msg` parameter +/// passed to the `on_new_message` method (or rather, the memory it points to), +/// lives sufficiently long for an async background task to process it. +#[derive(Debug)] +pub struct TcpMultiMachineHooks +where + I: Input, +{ + /// The sender hooks + pub sender: TcpMultiMachineLlmpSenderHook, + /// The hooks + pub receiver: TcpMultiMachineLlmpReceiverHook, +} + +impl TcpMultiMachineHooks<(), NopInput> { + /// Create the builder to build a new [`TcpMultiMachineHooks`] + /// containing a sender and a receiver from a [`NodeDescriptor`]. + #[must_use] + pub fn builder() -> TcpMultiMachineHooksBuilder<()> { + TcpMultiMachineHooksBuilder::<()> { + node_descriptor: None, + } + } +} + +/// A Multi-machine `broker_hooks` builder. +#[derive(Debug)] +pub struct TcpMultiMachineHooksBuilder { + node_descriptor: Option>, +} + +impl TcpMultiMachineHooksBuilder { + /// Set the multi machine [`NodeDescriptor`] used by the resulting [`TcpMultiMachineHooks`]. + pub fn node_descriptor( + self, + node_descriptor: NodeDescriptor, + ) -> TcpMultiMachineHooksBuilder + where + A2: Clone + Display + ToSocketAddrs + Send + Sync + 'static, + { + TcpMultiMachineHooksBuilder:: { + node_descriptor: Some(node_descriptor), + } + } +} + +impl TcpMultiMachineHooksBuilder +where + A: Clone + Display + ToSocketAddrs + Send + Sync + 'static, +{ + /// Build a new [`TcpMultiMachineHooks`] containing a sender and a receiver from a [`NodeDescriptor`]. + /// Everything is initialized and ready to be used. + /// Beware, the hooks should run in the same process as the one this function is called. + /// This is because we spawn a tokio runtime underneath. + /// Check `` for more details. + /// + /// # Safety + /// The returned [`TcpMultiMachineLlmpReceiverHook`] assumes that the `msg` parameter + /// passed to the `on_new_message` method (or rather, the memory it points to), + /// lives sufficiently long for an async background task to process it. + pub unsafe fn build(mut self) -> Result, Error> + where + I: Input + Send + Sync + 'static, + { + let node_descriptor = self.node_descriptor.take().ok_or_else(|| { + Error::illegal_state( + "The node descriptor can never be `None` at this point in the code", + ) + })?; + + // Create the state of the hook. This will be shared with the background server, so we wrap + // it with concurrent-safe objects + let state = Arc::new(RwLock::new(TcpMultiMachineState { + node_descriptor, + parent: None, + children: HashMap::default(), + old_msgs: Vec::new(), + #[cfg(feature = "llmp_compression")] + compressor: GzipCompressor::new(), + })); + + let rt = + Arc::new(Runtime::new().map_err(|_| Error::unknown("Tokio runtime spawning failed"))?); + + unsafe { + TcpMultiMachineState::init::(&state.clone(), &rt.clone())?; + } + + Ok(TcpMultiMachineHooks { + sender: TcpMultiMachineLlmpSenderHook::new(state.clone(), rt.clone()), + receiver: TcpMultiMachineLlmpReceiverHook::new(state, rt), + }) + } +} + +impl TcpMultiMachineState +where + A: Clone + Display + ToSocketAddrs + Send + Sync + 'static, +{ + /// Initializes the Multi-Machine state. + /// + /// # Safety + /// + /// This should be run **only once**, in the same process as the llmp hooks, and before the hooks + /// are effectively used. + unsafe fn init( + self_mutex: &Arc>, + rt: &Arc, + ) -> Result<(), Error> { + let node_descriptor = + rt.block_on(async { self_mutex.read().await.node_descriptor.clone() }); + + // Try to connect to the parent if we should + rt.block_on(async { + let parent_mutex = self_mutex.clone(); + let mut parent_lock = parent_mutex.write().await; + + if let Some(parent_addr) = &parent_lock.node_descriptor.parent_addr { + let timeout = current_time() + parent_lock.node_descriptor.timeout; + + parent_lock.parent = loop { + log::debug!("Trying to connect to parent @ {}..", parent_addr); + match TcpStream::connect(parent_addr).await { + Ok(stream) => { + log::debug!("Connected to parent @ {}", parent_addr); + + break Some(stream); + } + Err(e) => { + if current_time() > timeout { + return Err(Error::os_error(e, "Unable to connect to parent")); + } + } + } + + time::sleep(Duration::from_secs(1)).await; + }; + } + + Ok(()) + })?; + + // Now, setup the background tasks for the children to connect to + if let Some(listening_port) = node_descriptor.node_listening_port { + let bg_state = self_mutex.clone(); + let _handle: JoinHandle> = rt.spawn(async move { + let addr = format!("0.0.0.0:{listening_port}"); + log::debug!("Starting background child task on {addr}..."); + let listener = TcpListener::bind(addr).await.map_err(|e| { + Error::os_error(e, format!("Error while binding to port {listening_port}")) + })?; + let state = bg_state; + + // The main listening loop. Should never fail. + 'listening: loop { + log::debug!("listening for children on {:?}...", listener); + match listener.accept().await { + Ok((mut stream, addr)) => { + log::debug!("{} joined the children.", addr); + let mut state_guard = state.write().await; + + if let Err(e) = state_guard + .send_old_events_to_stream::(&mut stream) + .await + { + log::error!("Error while send old messages: {e:?}."); + log::error!("The loop will resume"); + continue 'listening; + } + + state_guard.children.insert(NodeId::new(), stream); + log::debug!( + "[pid {}]{addr} added the child. nb children: {}", + process::id(), + state_guard.children.len() + ); + } + Err(e) => { + log::error!("Error while accepting child {e:?}."); + } + } + } + }); + } + + Ok(()) + } + + /// Add an event as past event. + pub fn add_past_msg(&mut self, msg: &[u8]) { + self.old_msgs.push(msg.to_vec()); + } + + /// The compressor + #[cfg(feature = "llmp_compression")] + pub fn compressor(&mut self) -> &GzipCompressor { + &self.compressor + } + + /// Read a [`TcpMultiMachineMsg`] from a stream. + /// Expects a message written by [`TcpMultiMachineState::write_msg`]. + /// If there is nothing to read from the stream, return asap with Ok(None). + #[allow(clippy::uninit_vec)] + async fn read_msg<'a, I: Input + 'a>( + stream: &mut TcpStream, + ) -> Result>, Error> { + // 0. Check if we should try to fetch something from the stream + let mut dummy_byte: [u8; 1] = [0u8]; + log::debug!("Starting read msg..."); + + let n_read = match stream.try_read(&mut dummy_byte) { + Ok(n) => n, + Err(e) if e.kind() == ErrorKind::WouldBlock => { + return Ok(None); + } + Err(e) => return Err(Error::os_error(e, "try read failed")), + }; + + log::debug!("msg read."); + + if n_read == 0 { + log::debug!("No dummy byte received..."); + return Ok(None); // Nothing to read from this stream + } + + log::debug!("Received dummy byte!"); + + // we should always read the dummy byte at this point. + assert_eq!(u8::from_le_bytes(dummy_byte), DUMMY_BYTE); + + // 1. Read msg size + let mut node_msg_len: [u8; 4] = [0; 4]; + log::debug!("Receiving msg len..."); + stream.read_exact(&mut node_msg_len).await?; + log::debug!("msg len received."); + let node_msg_len = u32::from_le_bytes(node_msg_len) as usize; + + // 2. Read msg + // do not store msg on the stack to avoid overflow issues + // TODO: optimize with less allocations... + let mut node_msg: Vec = Vec::with_capacity(node_msg_len); + unsafe { + node_msg.set_len(node_msg_len); + } + log::debug!("Receiving msg..."); + stream.read_exact(node_msg.as_mut_slice()).await?; + log::debug!("msg received."); + let node_msg = node_msg.into_boxed_slice(); + + Ok(Some(MultiMachineMsg::from_llmp_msg(node_msg))) + } + + /// Write an [`OwnedTcpMultiMachineMsg`] to a stream. + /// Can be read back using [`TcpMultiMachineState::read_msg`]. + async fn write_msg<'a, I: Input>( + stream: &mut TcpStream, + msg: &MultiMachineMsg<'a, I>, + ) -> Result<(), Error> { + let serialized_msg = msg.serialize_as_ref(); + let msg_len = u32::to_le_bytes(serialized_msg.len() as u32); + + // 0. Write the dummy byte + log::debug!("Sending dummy byte..."); + stream.write_all(&[DUMMY_BYTE]).await?; + log::debug!("dummy byte sent."); + + // 1. Write msg size + log::debug!("Sending msg len..."); + stream.write_all(&msg_len).await?; + log::debug!("msg len sent."); + + // 2. Write msg + log::debug!("Sending msg..."); + stream.write_all(serialized_msg).await?; + log::debug!("msg sent."); + + Ok(()) + } + + pub(crate) async fn send_old_events_to_stream( + &mut self, + stream: &mut TcpStream, + ) -> Result<(), Error> { + log::debug!("Send old events to new child..."); + + for old_msg in &self.old_msgs { + let event_ref: MultiMachineMsg = + MultiMachineMsg::llmp_msg(OwnedRef::Ref(old_msg.as_slice())); + log::debug!("Sending an old message..."); + Self::write_msg(stream, &event_ref).await?; + log::debug!("Old message sent."); + } + + log::debug!("Sent {} old messages.", self.old_msgs.len()); + + Ok(()) + } + + pub(crate) async fn send_interesting_event_to_nodes<'a, I: Input>( + &mut self, + msg: &MultiMachineMsg<'a, I>, + ) -> Result<(), Error> { + log::debug!("Sending interesting events to nodes..."); + + if self + .node_descriptor + .flags + .intersects(NodePolicy::SendToParent) + { + if let Some(parent) = &mut self.parent { + log::debug!("Sending to parent..."); + if let Err(e) = Self::write_msg(parent, msg).await { + log::error!( + "The parent disconnected. We won't try to communicate with it again." + ); + log::error!("Error: {e:?}"); + self.parent.take(); + } + } + } + + if self + .node_descriptor + .flags + .intersects(NodePolicy::SendToChildren) + { + let mut ids_to_remove: Vec = Vec::new(); + for (child_id, child_stream) in &mut self.children { + log::debug!("Sending to child {child_id:?}..."); + if let Err(err) = Self::write_msg(child_stream, msg).await { + // most likely the child disconnected. drop the connection later on and continue. + log::debug!( + "The child disconnected. We won't try to communicate with it again. Error: {err:?}" + ); + ids_to_remove.push(*child_id); + } + } + + // Garbage collect disconnected children + for id_to_remove in &ids_to_remove { + log::debug!("Child {:?} has been garbage collected.", id_to_remove); + self.children.remove(id_to_remove); + } + } + + Ok(()) + } + + /// Flush the message queue from other nodes and add incoming events to the + /// centralized event manager queue. + pub(crate) async fn receive_new_messages_from_nodes<'a, I: Input>( + &mut self, + msgs: &mut Vec>, + ) -> Result<(), Error> { + log::debug!("Checking for new events from other nodes..."); + // let mut nb_received = 0usize; + + // Our (potential) parent could have something for us + if let Some(parent) = &mut self.parent { + loop { + // Exit if received a lot of inputs at once. + // TODO: this causes problems in some cases, it could freeze all fuzzer instances. + // if nb_received > MAX_NB_RECEIVED_AT_ONCE { + // log::debug!("hitting MAX_NB_RECEIVED_AT_ONCE limit..."); + // return Ok(()); + // } + + log::debug!("Receiving from parent..."); + match Self::read_msg(parent).await { + Ok(Some(msg)) => { + log::debug!("Received event from parent"); + // The parent has something for us, we store it + msgs.push(msg); + // nb_received += 1; + } + + Ok(None) => { + // nothing from the parent, we continue + log::debug!("Nothing from parent"); + break; + } + + Err(Error::OsError(_, _, _)) => { + // most likely the parent disconnected. drop the connection + log::debug!( + "The parent disconnected. We won't try to communicate with it again." + ); + self.parent.take(); + break; + } + + Err(e) => { + log::debug!("An error occurred and was not expected."); + return Err(e); + } + } + } + } + + // What about the (potential) children? + let mut ids_to_remove: Vec = Vec::new(); + log::debug!( + "[pid {}] Nb children: {}", + process::id(), + self.children.len() + ); + for (child_id, child_stream) in &mut self.children { + loop { + // Exit if received a lot of inputs at once. + // if nb_received > MAX_NB_RECEIVED_AT_ONCE { + // return Ok(()); + //} + + log::debug!("Receiving from child {child_id:?}..."); + match Self::read_msg(child_stream).await { + Ok(Some(msg)) => { + // The parent has something for us, we store it + log::debug!("Received event from child!"); + msgs.push(msg); + // nb_received += 1; + } + + Ok(None) => { + // nothing from the parent, we continue + log::debug!("Nothing from child"); + break; + } + + Err(Error::OsError(e, _, _)) => { + // most likely the parent disconnected. drop the connection + log::error!( + "The child disconnected. We won't try to communicate with it again." + ); + log::error!("Error: {e:?}"); + ids_to_remove.push(*child_id); + break; + } + + Err(e) => { + // Other error + log::debug!("An error occurred and was not expected."); + return Err(e); + } + } + } + } + + // Garbage collect disconnected children + for id_to_remove in &ids_to_remove { + log::debug!("Child {:?} has been garbage collected.", id_to_remove); + self.children.remove(id_to_remove); + } + + Ok(()) + } +} diff --git a/libafl/src/events/simple.rs b/libafl/src/events/simple.rs index d37bac7a2b..5289eaa2f7 100644 --- a/libafl/src/events/simple.rs +++ b/libafl/src/events/simple.rs @@ -1,8 +1,6 @@ //! A very simple event manager, that just supports log outputs, but no multiprocessing use alloc::{boxed::Box, vec::Vec}; -#[cfg(all(unix, not(miri), feature = "std"))] -use core::ptr::addr_of_mut; use core::{fmt::Debug, marker::PhantomData}; #[cfg(feature = "std")] use core::{ @@ -32,7 +30,7 @@ use crate::{ }, inputs::UsesInput, monitors::Monitor, - state::{HasExecutions, HasLastReportTime, State, UsesState}, + state::{HasExecutions, HasLastReportTime, State, Stoppable, UsesState}, Error, HasMetadata, }; #[cfg(feature = "std")] @@ -50,7 +48,7 @@ const _ENV_FUZZER_BROKER_CLIENT_INITIAL: &str = "_AFL_ENV_FUZZER_BROKER_CLIENT"; /// A simple, single-threaded event manager that just logs pub struct SimpleEventManager where - S: UsesInput, + S: UsesInput + Stoppable, { /// The monitor monitor: MT, @@ -64,7 +62,7 @@ where impl Debug for SimpleEventManager where MT: Debug, - S: UsesInput, + S: UsesInput + Stoppable, { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { f.debug_struct("SimpleEventManager") @@ -128,6 +126,10 @@ where } Ok(count) } + + fn on_shutdown(&mut self) -> Result<(), Error> { + self.send_exiting() + } } impl EventManager for SimpleEventManager @@ -163,7 +165,7 @@ where impl HasEventManagerId for SimpleEventManager where MT: Monitor, - S: UsesInput, + S: UsesInput + Stoppable, { fn mgr_id(&self) -> EventManagerId { EventManagerId(0) @@ -173,7 +175,7 @@ where #[cfg(feature = "std")] impl SimpleEventManager where - S: UsesInput, + S: UsesInput + Stoppable, { /// Creates a [`SimpleEventManager`] that just prints to `stdout`. #[must_use] @@ -185,7 +187,7 @@ where impl SimpleEventManager where MT: Monitor, //TODO CE: CustomEvent, - S: UsesInput, + S: UsesInput + Stoppable, { /// Creates a new [`SimpleEventManager`]. pub fn new(monitor: MT) -> Self { @@ -204,30 +206,16 @@ where event: &Event, ) -> Result { match event { - Event::NewTestcase { - input: _, - client_config: _, - exit_kind: _, - corpus_size, - observers_buf: _, - time, - executions, - forward_id: _, - } => { + Event::NewTestcase { corpus_size, .. } => { monitor.client_stats_insert(ClientId(0)); monitor .client_stats_mut_for(ClientId(0)) .update_corpus_size(*corpus_size as u64); - monitor - .client_stats_mut_for(ClientId(0)) - .update_executions(*executions, *time); monitor.display(event.name(), ClientId(0)); Ok(BrokerEventResult::Handled) } Event::UpdateExecStats { - time, - executions, - phantom: _, + time, executions, .. } => { // TODO: The monitor buffer should be added on client add. monitor.client_stats_insert(ClientId(0)); @@ -238,11 +226,7 @@ where monitor.display(event.name(), ClientId(0)); Ok(BrokerEventResult::Handled) } - Event::UpdateUserStats { - name, - value, - phantom: _, - } => { + Event::UpdateUserStats { name, value, .. } => { monitor.client_stats_insert(ClientId(0)); monitor .client_stats_mut_for(ClientId(0)) @@ -256,7 +240,7 @@ where time, executions, introspection_monitor, - phantom: _, + .. } => { // TODO: The monitor buffer should be added on client add. monitor.client_stats_insert(ClientId(0)); @@ -266,52 +250,52 @@ where monitor.display(event.name(), ClientId(0)); Ok(BrokerEventResult::Handled) } - Event::Objective { - objective_size, - executions, - time, - } => { + Event::Objective { objective_size, .. } => { monitor.client_stats_insert(ClientId(0)); monitor .client_stats_mut_for(ClientId(0)) .update_objective_size(*objective_size as u64); - monitor - .client_stats_mut_for(ClientId(0)) - .update_executions(*executions, *time); monitor.display(event.name(), ClientId(0)); Ok(BrokerEventResult::Handled) } Event::Log { severity_level, message, - phantom: _, + .. } => { let (_, _) = (message, severity_level); log::log!((*severity_level).into(), "{message}"); Ok(BrokerEventResult::Handled) } Event::CustomBuf { .. } => Ok(BrokerEventResult::Forward), - //_ => Ok(BrokerEventResult::Forward), + Event::Stop => Ok(BrokerEventResult::Forward), } } // Handle arriving events in the client #[allow(clippy::needless_pass_by_value, clippy::unused_self)] fn handle_in_client(&mut self, state: &mut S, event: Event) -> Result<(), Error> { - if let Event::CustomBuf { tag, buf } = &event { - for handler in &mut self.custom_buf_handlers { - handler(state, tag, buf)?; + match event { + Event::CustomBuf { buf, tag } => { + for handler in &mut self.custom_buf_handlers { + handler(state, &tag, &buf)?; + } + Ok(()) } - Ok(()) - } else { - Err(Error::unknown(format!( + Event::Stop => { + state.request_stop(); + Ok(()) + } + _ => Err(Error::unknown(format!( "Received illegal message that message should not have arrived: {event:?}." - ))) + ))), } } } -/// Provides a `builder` which can be used to build a [`SimpleRestartingEventManager`], which is a combination of a +/// Provides a `builder` which can be used to build a [`SimpleRestartingEventManager`]. +/// +/// The [`SimpleRestartingEventManager`] is a combination of a /// `restarter` and `runner`, that can be used on systems both with and without `fork` support. The /// `restarter` will start a new process each time the child crashes or times out. #[cfg(feature = "std")] @@ -319,7 +303,7 @@ where #[derive(Debug)] pub struct SimpleRestartingEventManager where - S: UsesInput, + S: UsesInput + Stoppable, SP: ShMemProvider, //CE: CustomEvent, { /// The actual simple event mgr @@ -398,6 +382,9 @@ where ) -> Result { self.simple_event_mgr.process(fuzzer, state, executor) } + fn on_shutdown(&mut self) -> Result<(), Error> { + self.send_exiting() + } } #[cfg(feature = "std")] @@ -437,7 +424,7 @@ where impl HasEventManagerId for SimpleRestartingEventManager where MT: Monitor, - S: UsesInput, + S: UsesInput + Stoppable, SP: ShMemProvider, { fn mgr_id(&self) -> EventManagerId { @@ -449,7 +436,7 @@ where #[allow(clippy::type_complexity, clippy::too_many_lines)] impl SimpleRestartingEventManager where - S: UsesInput, + S: UsesInput + Stoppable, SP: ShMemProvider, MT: Monitor, //TODO CE: CustomEvent, { @@ -554,7 +541,7 @@ where // At this point we are the fuzzer *NOT* the restarter. // We setup signal handlers to clean up shmem segments used by state restorer #[cfg(all(unix, not(miri)))] - if let Err(_e) = unsafe { setup_signal_handler(addr_of_mut!(EVENTMGR_SIGHANDLER_STATE)) } { + if let Err(_e) = unsafe { setup_signal_handler(&raw mut EVENTMGR_SIGHANDLER_STATE) } { // We can live without a proper ctrl+c signal handler. Print and ignore. log::error!("Failed to setup signal handlers: {_e}"); } diff --git a/libafl/src/events/tcp.rs b/libafl/src/events/tcp.rs index eb0010241f..977ad9de85 100644 --- a/libafl/src/events/tcp.rs +++ b/libafl/src/events/tcp.rs @@ -1,8 +1,6 @@ //! TCP-backed event manager for scalable multi-processed fuzzing use alloc::{boxed::Box, vec::Vec}; -#[cfg(all(unix, feature = "std", not(miri)))] -use core::ptr::addr_of_mut; use core::{ marker::PhantomData, num::NonZeroUsize, @@ -31,7 +29,7 @@ use libafl_bolts::os::{fork, ForkResult}; use libafl_bolts::{shmem::ShMemProvider, tuples::tuple_list, ClientId}; #[cfg(feature = "std")] use libafl_bolts::{shmem::StdShMemProvider, staterestore::StateRestorer}; -use serde::{de::DeserializeOwned, Deserialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use tokio::{ io::{AsyncReadExt, AsyncWriteExt}, sync::{broadcast, broadcast::error::RecvError, mpsc}, @@ -45,15 +43,16 @@ use super::{CustomBufEventResult, CustomBufHandlerFn}; use crate::events::EVENTMGR_SIGHANDLER_STATE; use crate::{ events::{ - hooks::EventManagerHooksTuple, BrokerEventResult, Event, EventConfig, EventFirer, - EventManager, EventManagerId, EventProcessor, EventRestarter, HasCustomBufHandlers, - HasEventManagerId, ProgressReporter, + BrokerEventResult, Event, EventConfig, EventFirer, EventManager, EventManagerHooksTuple, + EventManagerId, EventProcessor, EventRestarter, HasCustomBufHandlers, HasEventManagerId, + ProgressReporter, }, executors::{Executor, HasObservers}, fuzzer::{EvaluatorObservers, ExecutionProcessor}, inputs::{Input, UsesInput}, monitors::Monitor, - state::{HasExecutions, HasLastReportTime, State, UsesState}, + observers::ObserversTuple, + state::{HasExecutions, HasImported, HasLastReportTime, State, UsesState}, Error, HasMetadata, }; @@ -112,8 +111,9 @@ where } /// Run in the broker until all clients exit + // TODO: remove allow(clippy::needless_return) when clippy is fixed #[tokio::main(flavor = "current_thread")] - #[allow(clippy::too_many_lines)] + #[allow(clippy::too_many_lines, clippy::needless_return)] pub async fn broker_loop(&mut self) -> Result<(), Error> { let (tx_bc, rx) = broadcast::channel(65536); let (tx, mut rx_mpsc) = mpsc::channel(65536); @@ -318,14 +318,9 @@ where ) -> Result { match &event { Event::NewTestcase { - input: _, - client_config: _, - exit_kind: _, corpus_size, - observers_buf: _, - time, - executions, forward_id, + .. } => { let id = if let Some(id) = *forward_id { id @@ -335,7 +330,6 @@ where monitor.client_stats_insert(id); let client = monitor.client_stats_mut_for(id); client.update_corpus_size(*corpus_size as u64); - client.update_executions(*executions, *time); monitor.display(event.name(), id); Ok(BrokerEventResult::Forward) } @@ -388,15 +382,10 @@ where // Correctly handled the event Ok(BrokerEventResult::Handled) } - Event::Objective { - objective_size, - executions, - time, - } => { + Event::Objective { objective_size, .. } => { monitor.client_stats_insert(client_id); let client = monitor.client_stats_mut_for(client_id); client.update_objective_size(*objective_size as u64); - client.update_executions(*executions, *time); monitor.display(event.name(), client_id); Ok(BrokerEventResult::Handled) } @@ -410,7 +399,7 @@ where log::log!((*severity_level).into(), "{message}"); Ok(BrokerEventResult::Handled) } - Event::CustomBuf { .. } => Ok(BrokerEventResult::Forward), + Event::CustomBuf { .. } | Event::Stop => Ok(BrokerEventResult::Forward), //_ => Ok(BrokerEventResult::Forward), } } @@ -594,7 +583,7 @@ where impl TcpEventManager where EMH: EventManagerHooksTuple, - S: State + HasExecutions + HasMetadata, + S: State + HasExecutions + HasMetadata + HasImported, { /// Write the client id for a client [`EventManager`] to env vars pub fn to_env(&self, env_name: &str) { @@ -612,9 +601,11 @@ where event: Event, ) -> Result<(), Error> where - E: Executor + HasObservers, + E: Executor + HasObservers, + E::Observers: Serialize + ObserversTuple, for<'a> E::Observers: Deserialize<'a>, - Z: ExecutionProcessor + EvaluatorObservers, + Z: ExecutionProcessor + + EvaluatorObservers, { if !self.hooks.pre_exec_all(state, client_id, &event)? { return Ok(()); @@ -624,11 +615,9 @@ where input, client_config, exit_kind, - corpus_size: _, observers_buf, - time: _, - executions: _, forward_id, + .. } => { log::info!("Received new Testcase from {client_id:?} ({client_config:?}, forward {forward_id:?})"); @@ -641,17 +630,17 @@ where { state.scalability_monitor_mut().testcase_with_observers += 1; } - fuzzer.execute_and_process(state, self, input, &observers, &exit_kind, false)? + fuzzer.evaluate_execution(state, self, input, &observers, &exit_kind, false)? } else { #[cfg(feature = "scalability_introspection")] { state.scalability_monitor_mut().testcase_without_observers += 1; } - fuzzer.evaluate_input_with_observers::( - state, executor, self, input, false, - )? + fuzzer + .evaluate_input_with_observers::(state, executor, self, input, false)? }; if let Some(item) = _res.1 { + *state.imported_mut() += 1; log::info!("Added received Testcase as item #{item}"); } } @@ -662,6 +651,9 @@ where } } } + Event::Stop => { + state.request_stop(); + } _ => { return Err(Error::unknown(format!( "Received illegal message that message should not have arrived: {:?}.", @@ -749,11 +741,13 @@ where impl EventProcessor for TcpEventManager where - E: HasObservers + Executor, + E: HasObservers + Executor, + E::Observers: Serialize + ObserversTuple, for<'a> E::Observers: Deserialize<'a>, EMH: EventManagerHooksTuple, - S: State + HasExecutions + HasMetadata, - Z: EvaluatorObservers + ExecutionProcessor, + S: State + HasExecutions + HasMetadata + HasImported, + Z: EvaluatorObservers + + ExecutionProcessor, { fn process( &mut self, @@ -814,15 +808,21 @@ where Ok(count) } + + fn on_shutdown(&mut self) -> Result<(), Error> { + self.send_exiting() + } } impl EventManager for TcpEventManager where - E: HasObservers + Executor, + E: HasObservers + Executor, + E::Observers: Serialize + ObserversTuple, for<'a> E::Observers: Deserialize<'a>, EMH: EventManagerHooksTuple, - S: State + HasExecutions + HasMetadata + HasLastReportTime, - Z: EvaluatorObservers + ExecutionProcessor, + S: State + HasExecutions + HasMetadata + HasLastReportTime + HasImported, + Z: EvaluatorObservers + + ExecutionProcessor, { } @@ -962,27 +962,35 @@ where #[cfg(feature = "std")] impl EventProcessor for TcpRestartingEventManager where - E: HasObservers + Executor, Z>, + E: HasObservers + Executor, Z, State = S>, for<'a> E::Observers: Deserialize<'a>, + E::Observers: ObserversTuple + Serialize, EMH: EventManagerHooksTuple, - S: State + HasExecutions + HasMetadata, + S: State + HasExecutions + HasMetadata + HasImported, SP: ShMemProvider + 'static, - Z: EvaluatorObservers + ExecutionProcessor, //CE: CustomEvent, + Z: EvaluatorObservers, E::Observers, State = S> + + ExecutionProcessor, E::Observers>, //CE: CustomEvent, { fn process(&mut self, fuzzer: &mut Z, state: &mut S, executor: &mut E) -> Result { self.tcp_mgr.process(fuzzer, state, executor) } + + fn on_shutdown(&mut self) -> Result<(), Error> { + self.send_exiting() + } } #[cfg(feature = "std")] impl EventManager for TcpRestartingEventManager where - E: HasObservers + Executor, Z>, + E: HasObservers + Executor, Z, State = S>, + E::Observers: ObserversTuple + Serialize, for<'a> E::Observers: Deserialize<'a>, EMH: EventManagerHooksTuple, - S: State + HasExecutions + HasMetadata + HasLastReportTime, + S: State + HasExecutions + HasMetadata + HasLastReportTime + HasImported, SP: ShMemProvider + 'static, - Z: EvaluatorObservers + ExecutionProcessor, //CE: CustomEvent, + Z: EvaluatorObservers, E::Observers, State = S> + + ExecutionProcessor, E::Observers>, //CE: CustomEvent, { } @@ -1061,7 +1069,8 @@ pub enum TcpManagerKind { } /// Sets up a restarting fuzzer, using the [`StdShMemProvider`], and standard features. -/// The restarting mgr is a combination of restarter and runner, that can be used on systems with and without `fork` support. +/// +/// The [`TcpRestartingEventManager`] is a combination of restarter and runner, that can be used on systems with and without `fork` support. /// The restarter will spawn a new process each time the child crashes or timeouts. #[cfg(feature = "std")] #[allow(clippy::type_complexity)] @@ -1078,7 +1087,7 @@ pub fn setup_restarting_mgr_tcp( > where MT: Monitor + Clone, - S: State + HasExecutions + HasMetadata, + S: State + HasExecutions + HasMetadata + HasImported, { TcpRestartingMgr::builder() .shmem_provider(StdShMemProvider::new()?) @@ -1090,7 +1099,9 @@ where .launch() } -/// Provides a `builder` which can be used to build a [`TcpRestartingMgr`], which is a combination of a +/// Provides a `builder` which can be used to build a [`TcpRestartingMgr`]. +/// +/// The [`TcpRestartingMgr`] is a combination of a /// `restarter` and `runner`, that can be used on systems both with and without `fork` support. The /// `restarter` will start a new process each time the child crashes or times out. #[cfg(feature = "std")] @@ -1143,7 +1154,7 @@ impl TcpRestartingMgr where EMH: EventManagerHooksTuple + Copy + Clone, SP: ShMemProvider, - S: State + HasExecutions + HasMetadata, + S: State + HasExecutions + HasMetadata + HasImported, MT: Monitor + Clone, { /// Launch the restarting manager @@ -1313,7 +1324,7 @@ where // At this point we are the fuzzer *NOT* the restarter. // We setup signal handlers to clean up shmem segments used by state restorer #[cfg(all(unix, not(miri)))] - if let Err(_e) = unsafe { setup_signal_handler(addr_of_mut!(EVENTMGR_SIGHANDLER_STATE)) } { + if let Err(_e) = unsafe { setup_signal_handler(&raw mut EVENTMGR_SIGHANDLER_STATE) } { // We can live without a proper ctrl+c signal handler. Print and ignore. log::error!("Failed to setup signal handlers: {_e}"); } diff --git a/libafl/src/executors/combined.rs b/libafl/src/executors/combined.rs index 245327106b..639f1f1528 100644 --- a/libafl/src/executors/combined.rs +++ b/libafl/src/executors/combined.rs @@ -1,13 +1,13 @@ //! A `CombinedExecutor` wraps a primary executor and a secondary one //! In comparison to the [`crate::executors::DiffExecutor`] it does not run the secondary executor in `run_target`. -use core::fmt::Debug; +use core::{fmt::Debug, time::Duration}; use libafl_bolts::tuples::RefIndexable; +use super::HasTimeout; use crate::{ executors::{Executor, ExitKind, HasObservers}, - observers::UsesObservers, state::{HasExecutions, UsesState}, Error, }; @@ -57,12 +57,31 @@ where mgr: &mut EM, input: &Self::Input, ) -> Result { - *state.executions_mut() += 1; - self.primary.run_target(fuzzer, state, mgr, input) } } +impl HasTimeout for CombinedExecutor +where + A: HasTimeout, + B: HasTimeout, +{ + #[inline] + fn set_timeout(&mut self, timeout: Duration) { + self.primary.set_timeout(timeout); + self.secondary.set_timeout(timeout); + } + + #[inline] + fn timeout(&self) -> Duration { + assert!( + self.primary.timeout() == self.secondary.timeout(), + "Primary and Secondary Executors have different timeouts!" + ); + self.primary.timeout() + } +} + impl UsesState for CombinedExecutor where A: UsesState, @@ -70,17 +89,12 @@ where type State = A::State; } -impl UsesObservers for CombinedExecutor -where - A: UsesObservers, -{ - type Observers = A::Observers; -} - impl HasObservers for CombinedExecutor where A: HasObservers, { + type Observers = A::Observers; + #[inline] fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { self.primary.observers() diff --git a/libafl/src/executors/command.rs b/libafl/src/executors/command.rs index 8f571f82ab..946c45a695 100644 --- a/libafl/src/executors/command.rs +++ b/libafl/src/executors/command.rs @@ -5,40 +5,49 @@ use core::{ marker::PhantomData, ops::IndexMut, }; -#[cfg(unix)] -use std::os::unix::ffi::OsStrExt; -#[cfg(feature = "std")] -use std::process::Child; +#[cfg(target_os = "linux")] +use std::{ + ffi::{CStr, CString}, + os::fd::AsRawFd, +}; use std::{ ffi::{OsStr, OsString}, io::{Read, Write}, + os::unix::ffi::OsStrExt, path::{Path, PathBuf}, - process::{Command, Stdio}, + process::{Child, Command, Stdio}, time::Duration, }; +#[cfg(target_os = "linux")] +use libafl_bolts::core_affinity::CoreId; use libafl_bolts::{ fs::{get_unique_std_input_file, InputFile}, tuples::{Handle, MatchName, RefIndexable}, AsSlice, }; +#[cfg(target_os = "linux")] +use libc::STDIN_FILENO; +#[cfg(target_os = "linux")] +use nix::unistd::Pid; +#[cfg(target_os = "linux")] +use typed_builder::TypedBuilder; -#[cfg(all(feature = "std", unix))] -use crate::executors::{Executor, ExitKind}; +use super::HasTimeout; use crate::{ - executors::HasObservers, - inputs::{HasTargetBytes, UsesInput}, - observers::{ObserversTuple, StdErrObserver, StdOutObserver, UsesObservers}, - state::{HasExecutions, State, UsesState}, + corpus::Corpus, + executors::{hooks::ExecutorHooksTuple, Executor, ExitKind, HasObservers}, + inputs::{HasTargetBytes, Input, UsesInput}, + observers::{ObserversTuple, StdErrObserver, StdOutObserver}, + state::{HasCorpus, HasExecutions, State, UsesState}, std::borrow::ToOwned, + Error, }; -#[cfg(feature = "std")] -use crate::{inputs::Input, Error}; /// How to deliver input to an external program /// `StdIn`: The target reads from stdin /// `File`: The target reads from the specified [`InputFile`] -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq, Default)] pub enum InputLocation { /// Mutate a commandline argument to deliver an input Arg { @@ -46,6 +55,7 @@ pub enum InputLocation { argnum: usize, }, /// Deliver input via `StdIn` + #[default] StdIn, /// Deliver the input via the specified [`InputFile`] /// You can use specify [`InputFile::create(INPUTFILE_STD)`] to use a default filename. @@ -105,7 +115,7 @@ where for (i, arg) in args.enumerate() { if i == *argnum { - debug_assert_eq!(arg, "DUMMY"); + debug_assert_eq!(arg, "PLACEHOLDER"); #[cfg(unix)] cmd.arg(OsStr::from_bytes(input.target_bytes().as_slice())); // There is an issue here that the chars on Windows are 16 bit wide. @@ -151,17 +161,121 @@ where fn exec_timeout(&self) -> Duration { self.timeout } + fn exec_timeout_mut(&mut self) -> &mut Duration { + &mut self.timeout + } } -/// A `CommandExecutor` is a wrapper around [`std::process::Command`] to execute a target as a child process. +/// Linux specific [`CommandConfigurator`] that leverages `ptrace` +/// +/// This configurator was primarly developed to be used in conjunction with +/// [`crate::executors::hooks::intel_pt::IntelPTHook`] +#[cfg(target_os = "linux")] +#[derive(Debug, Clone, PartialEq, Eq, TypedBuilder)] +pub struct PTraceCommandConfigurator { + #[builder(setter(into))] + path: CString, + #[builder(default)] + args: Vec, + #[builder(default)] + env: Vec, + #[builder(default)] + input_location: InputLocation, + #[builder(default, setter(strip_option))] + cpu: Option, + #[builder(default = 5 * 60, setter(transform = |t: Duration| t.as_secs() as u32))] + timeout: u32, +} + +#[cfg(target_os = "linux")] +impl CommandConfigurator for PTraceCommandConfigurator +where + I: HasTargetBytes, +{ + fn spawn_child(&mut self, input: &I) -> Result { + use nix::{ + sys::{ + personality, ptrace, + signal::{raise, Signal}, + }, + unistd::{alarm, dup2, execve, fork, pipe, write, ForkResult}, + }; + + match unsafe { fork() } { + Ok(ForkResult::Parent { child }) => Ok(child), + Ok(ForkResult::Child) => { + ptrace::traceme().unwrap(); + + if let Some(c) = self.cpu { + c.set_affinity_forced().unwrap(); + } + + // Disable Address Space Layout Randomization (ASLR) for consistent memory + // addresses between executions + let pers = personality::get().unwrap(); + personality::set(pers | personality::Persona::ADDR_NO_RANDOMIZE).unwrap(); + + match &mut self.input_location { + InputLocation::Arg { argnum } => { + // self.args[argnum] will be overwritten if already present. + assert!( + *argnum <= self.args.len(), + "If you want to fuzz arg {argnum}, you have to specify the other {argnum} (static) args." + ); + let terminated_input = [&input.target_bytes() as &[u8], &[0]].concat(); + let cstring_input = + CString::from(CStr::from_bytes_until_nul(&terminated_input).unwrap()); + if *argnum == self.args.len() { + self.args.push(cstring_input); + } else { + self.args[*argnum] = cstring_input; + } + } + InputLocation::StdIn => { + let (pipe_read, pipe_write) = pipe().unwrap(); + write(pipe_write, &input.target_bytes()).unwrap(); + dup2(pipe_read.as_raw_fd(), STDIN_FILENO).unwrap(); + } + InputLocation::File { out_file } => { + out_file.write_buf(input.target_bytes().as_slice()).unwrap(); + } + } + + // After this STOP, the process is traced with PTrace (no hooks yet) + raise(Signal::SIGSTOP).unwrap(); + + alarm::set(self.timeout); + + // Just before this returns, hooks pre_execs are called + execve(&self.path, &self.args, &self.env).unwrap(); + unreachable!("execve returns only on error and its result is unwrapped"); + } + Err(e) => Err(Error::unknown(format!("Fork failed: {e}"))), + } + } + + fn exec_timeout(&self) -> Duration { + Duration::from_secs(u64::from(self.timeout)) + } + + /// Use [`PTraceCommandConfigurator::builder().timeout`] instead + fn exec_timeout_mut(&mut self) -> &mut Duration { + unimplemented!("Use [`PTraceCommandConfigurator::builder().timeout`] instead") + } +} + +/// A `CommandExecutor` is a wrapper around [`Command`] to execute a target as a child process. +/// /// Construct a `CommandExecutor` by implementing [`CommandConfigurator`] for a type of your choice and calling [`CommandConfigurator::into_executor`] on it. /// Instead, you can use [`CommandExecutor::builder()`] to construct a [`CommandExecutor`] backed by a [`StdCommandConfigurator`]. -pub struct CommandExecutor { +pub struct CommandExecutor { /// The wrapped command configurer configurer: T, /// The observers used by this executor observers: OT, + hooks: HT, phantom: PhantomData, + phantom_child: PhantomData, } impl CommandExecutor<(), (), ()> { @@ -173,7 +287,7 @@ impl CommandExecutor<(), (), ()> { /// `arg`, `args`, `env`, and so on. /// /// By default, input is read from stdin, unless you specify a different location using - /// * `arg_input_arg` for input delivered _as_ an command line argument + /// * `arg_input_arg` for input delivered _as_ a command line argument /// * `arg_input_file` for input via a file of a specific name /// * `arg_input_file_std` for a file with default name (at the right location in the arguments) #[must_use] @@ -182,20 +296,22 @@ impl CommandExecutor<(), (), ()> { } } -impl Debug for CommandExecutor +impl Debug for CommandExecutor where T: Debug, OT: Debug, + HT: Debug, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("CommandExecutor") .field("inner", &self.configurer) .field("observers", &self.observers) + .field("hooks", &self.hooks) .finish() } } -impl CommandExecutor +impl CommandExecutor where T: Debug, OT: Debug, @@ -207,24 +323,13 @@ where } // this only works on unix because of the reliance on checking the process signal for detecting OOM -#[cfg(all(feature = "std", unix))] -impl Executor for CommandExecutor +impl CommandExecutor where - EM: UsesState, - S: State + HasExecutions, - T: CommandConfigurator + Debug, - OT: Debug + MatchName + ObserversTuple, - Z: UsesState, + S: State + HasExecutions + UsesInput, + T: CommandConfigurator + Debug, + OT: Debug + ObserversTuple, { - fn run_target( - &mut self, - _fuzzer: &mut Z, - state: &mut Self::State, - _mgr: &mut EM, - input: &Self::Input, - ) -> Result { - use std::os::unix::prelude::ExitStatusExt; - + fn execute_input_with_command(&mut self, state: &mut S, input: &I) -> Result { use wait_timeout::ChildExt; *state.executions_mut() += 1; @@ -232,29 +337,21 @@ where let mut child = self.configurer.spawn_child(input)?; - let res = match child + let exit_kind = child .wait_timeout(self.configurer.exec_timeout()) .expect("waiting on child failed") - .map(|status| status.signal()) - { - // for reference: https://www.man7.org/linux/man-pages/man7/signal.7.html - Some(Some(9)) => Ok(ExitKind::Oom), - Some(Some(_)) => Ok(ExitKind::Crash), - Some(None) => Ok(ExitKind::Ok), - None => { + .map(|status| self.configurer.exit_kind_from_status(&status)) + .unwrap_or_else(|| { // if this fails, there is not much we can do. let's hope it failed because the process finished // in the meantime. drop(child.kill()); // finally, try to wait to properly clean up system resources. drop(child.wait()); - Ok(ExitKind::Timeout) - } - }; + ExitKind::Timeout + }); - if let Ok(exit_kind) = res { - self.observers - .post_exec_child_all(state, input, &exit_kind)?; - } + self.observers + .post_exec_child_all(state, input, &exit_kind)?; if let Some(h) = &mut self.configurer.stdout_observer() { let mut stdout = Vec::new(); @@ -278,31 +375,141 @@ where let obs = observers.index_mut(h); obs.observe_stderr(&stderr); } - res + Ok(exit_kind) } } -impl UsesState for CommandExecutor +impl Executor for CommandExecutor +where + EM: UsesState, + S: State + HasExecutions + UsesInput, + T: CommandConfigurator + Debug, + OT: Debug + MatchName + ObserversTuple, + Z: UsesState, +{ + fn run_target( + &mut self, + _fuzzer: &mut Z, + state: &mut Self::State, + _mgr: &mut EM, + input: &Self::Input, + ) -> Result { + self.execute_input_with_command(state, input) + } +} + +// this only works on unix because of the reliance on checking the process signal for detecting OOM +impl HasTimeout for CommandExecutor +where + S: HasCorpus, + T: CommandConfigurator<::Input>, +{ + #[inline] + fn set_timeout(&mut self, timeout: Duration) { + *self.configurer.exec_timeout_mut() = timeout; + } + + #[inline] + fn timeout(&self) -> Duration { + self.configurer.exec_timeout() + } +} + +#[cfg(target_os = "linux")] +impl Executor for CommandExecutor +where + EM: UsesState, + S: State + HasExecutions + UsesInput, + T: CommandConfigurator + Debug, + OT: Debug + MatchName + ObserversTuple, + Z: UsesState, + HT: ExecutorHooksTuple, +{ + /// Linux specific low level implementation, to directly handle `fork`, `exec` and use linux + /// `ptrace` + /// + /// Hooks' `pre_exec` and observers' `pre_exec_child` are called with the child process stopped + /// just before the `exec` return (after forking). + fn run_target( + &mut self, + _fuzzer: &mut Z, + state: &mut Self::State, + _mgr: &mut EM, + input: &Self::Input, + ) -> Result { + use nix::sys::{ + ptrace, + signal::Signal, + wait::{ + waitpid, WaitPidFlag, + WaitStatus::{Exited, PtraceEvent, Signaled, Stopped}, + }, + }; + + *state.executions_mut() += 1; + + let child = self.configurer.spawn_child(input)?; + + let wait_status = waitpid(child, Some(WaitPidFlag::WUNTRACED))?; + if !matches!(wait_status, Stopped(c, Signal::SIGSTOP) if c == child) { + return Err(Error::unknown("Unexpected state of child process")); + } + + ptrace::setoptions(child, ptrace::Options::PTRACE_O_TRACEEXEC)?; + ptrace::cont(child, None)?; + + let wait_status = waitpid(child, None)?; + if !matches!(wait_status, PtraceEvent(c, Signal::SIGTRAP, e) + if c == child && e == (ptrace::Event::PTRACE_EVENT_EXEC as i32) + ) { + return Err(Error::unknown("Unexpected state of child process")); + } + + self.observers.pre_exec_child_all(state, input)?; + if *state.executions() == 1 { + self.hooks.init_all::(state); + } + self.hooks.pre_exec_all(state, input); + + ptrace::detach(child, None)?; + let res = match waitpid(child, None)? { + Exited(pid, 0) if pid == child => ExitKind::Ok, + Exited(pid, _) if pid == child => ExitKind::Crash, + Signaled(pid, Signal::SIGALRM, _has_coredump) if pid == child => ExitKind::Timeout, + Signaled(pid, Signal::SIGABRT, _has_coredump) if pid == child => ExitKind::Crash, + Signaled(pid, Signal::SIGKILL, _has_coredump) if pid == child => ExitKind::Oom, + Stopped(pid, Signal::SIGALRM) if pid == child => ExitKind::Timeout, + Stopped(pid, Signal::SIGABRT) if pid == child => ExitKind::Crash, + Stopped(pid, Signal::SIGKILL) if pid == child => ExitKind::Oom, + s => { + // TODO other cases? + return Err(Error::unsupported( + format!("Target program returned an unexpected state when waiting on it. {s:?} (waiting for pid {child})") + )); + } + }; + + self.hooks.post_exec_all(state, input); + self.observers.post_exec_child_all(state, input, &res)?; + Ok(res) + } +} + +impl UsesState for CommandExecutor where S: State, { type State = S; } -impl UsesObservers for CommandExecutor -where - OT: ObserversTuple, - S: State, -{ - type Observers = OT; -} - -impl HasObservers for CommandExecutor +impl HasObservers for CommandExecutor where S: State, T: Debug, - OT: ObserversTuple, + OT: ObserversTuple, { + type Observers = OT; + fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { RefIndexable::from(&self.observers) } @@ -378,7 +585,8 @@ impl CommandExecutorBuilder { pub fn arg_input_arg(&mut self) -> &mut Self { let argnum = self.args.len(); self.input(InputLocation::Arg { argnum }); - // self.arg("DUMMY"); + // Placeholder arg that gets replaced with the input name later. + self.arg("PLACEHOLDER"); self } @@ -481,7 +689,7 @@ impl CommandExecutorBuilder { observers: OT, ) -> Result, Error> where - OT: MatchName + ObserversTuple, + OT: MatchName + ObserversTuple, S: UsesInput, S::Input: Input + HasTargetBytes, { @@ -539,10 +747,9 @@ impl CommandExecutorBuilder { } } -/// A `CommandConfigurator` takes care of creating and spawning a [`std::process::Command`] for the [`CommandExecutor`]. +/// A `CommandConfigurator` takes care of creating and spawning a [`Command`] for the [`CommandExecutor`]. /// # Example -#[cfg_attr(all(feature = "std", unix), doc = " ```")] -#[cfg_attr(not(all(feature = "std", unix)), doc = " ```ignore")] +/// ``` /// use std::{io::Write, process::{Stdio, Command, Child}, time::Duration}; /// use libafl::{Error, inputs::{BytesInput, HasTargetBytes, Input, UsesInput}, executors::{Executor, command::CommandConfigurator}, state::{UsesState, HasExecutions}}; /// use libafl_bolts::AsSlice; @@ -569,6 +776,9 @@ impl CommandExecutorBuilder { /// fn exec_timeout(&self) -> Duration { /// Duration::from_secs(5) /// } +/// fn exec_timeout_mut(&mut self) -> &mut Duration { +/// todo!() +/// } /// } /// /// fn make_executor() -> impl Executor @@ -580,9 +790,7 @@ impl CommandExecutorBuilder { /// MyExecutor.into_executor(()) /// } /// ``` - -#[cfg(all(feature = "std", any(unix, doc)))] -pub trait CommandConfigurator: Sized { +pub trait CommandConfigurator: Sized { /// Get the stdout fn stdout_observer(&self) -> Option> { None @@ -593,20 +801,56 @@ pub trait CommandConfigurator: Sized { } /// Spawns a new process with the given configuration. - fn spawn_child(&mut self, input: &I) -> Result; + fn spawn_child(&mut self, input: &I) -> Result; /// Provides timeout duration for execution of the child process. fn exec_timeout(&self) -> Duration; + /// Set the timeout duration for execution of the child process. + fn exec_timeout_mut(&mut self) -> &mut Duration; + + /// Maps the exit status of the child process to an `ExitKind`. + #[inline] + fn exit_kind_from_status(&self, status: &std::process::ExitStatus) -> ExitKind { + use crate::std::os::unix::process::ExitStatusExt; + match status.signal() { + // for reference: https://www.man7.org/linux/man-pages/man7/signal.7.html + Some(9) => ExitKind::Oom, + Some(_) => ExitKind::Crash, + None => ExitKind::Ok, + } + } /// Create an `Executor` from this `CommandConfigurator`. - fn into_executor(self, observers: OT) -> CommandExecutor + fn into_executor(self, observers: OT) -> CommandExecutor where OT: MatchName, { CommandExecutor { configurer: self, observers, + hooks: (), phantom: PhantomData, + phantom_child: PhantomData, + } + } + + /// Create an `Executor` with hooks from this `CommandConfigurator`. + fn into_executor_with_hooks( + self, + observers: OT, + hooks: HT, + ) -> CommandExecutor + where + OT: MatchName, + HT: ExecutorHooksTuple, + S: UsesInput, + { + CommandExecutor { + configurer: self, + observers, + hooks, + phantom: PhantomData, + phantom_child: PhantomData, } } } @@ -619,14 +863,13 @@ mod tests { command::{CommandExecutor, InputLocation}, Executor, }, - fuzzer::test::NopFuzzer, + fuzzer::NopFuzzer, inputs::BytesInput, monitors::SimpleMonitor, state::NopState, }; #[test] - #[cfg(unix)] #[cfg_attr(miri, ignore)] fn test_builder() { let mut mgr = SimpleEventManager::new(SimpleMonitor::new(|status| { diff --git a/libafl/src/executors/differential.rs b/libafl/src/executors/differential.rs index 3cfaaa8e86..316295be11 100644 --- a/libafl/src/executors/differential.rs +++ b/libafl/src/executors/differential.rs @@ -1,8 +1,14 @@ //! Executor for differential fuzzing. +//! //! It wraps two executors that will be run after each other with the same input. //! In comparison to the [`crate::executors::CombinedExecutor`] it also runs the secondary executor in `run_target`. //! -use core::{cell::UnsafeCell, fmt::Debug, ptr}; +use core::{ + cell::UnsafeCell, + fmt::Debug, + ops::{Deref, DerefMut}, + ptr, +}; use libafl_bolts::{ ownedref::OwnedMutPtr, @@ -10,11 +16,13 @@ use libafl_bolts::{ }; use serde::{Deserialize, Serialize}; +use super::HasTimeout; use crate::{ + corpus::Corpus, executors::{Executor, ExitKind, HasObservers}, inputs::UsesInput, - observers::{DifferentialObserversTuple, ObserversTuple, UsesObservers}, - state::UsesState, + observers::{DifferentialObserversTuple, ObserversTuple}, + state::{HasCorpus, UsesState}, Error, }; @@ -32,9 +40,7 @@ impl DiffExecutor { where A: UsesState + HasObservers, B: UsesState::State> + HasObservers, - DOT: DifferentialObserversTuple::State>, - OTA: ObserversTuple<::State>, - OTB: ObserversTuple<::State>, + DOT: DifferentialObserversTuple, { Self { primary, @@ -63,7 +69,11 @@ where A: Executor + HasObservers, B: Executor::State> + HasObservers, EM: UsesState::State>, - DOT: DifferentialObserversTuple::State>, + ::Observers: + ObserversTuple<<::State as UsesInput>::Input, ::State>, + ::Observers: + ObserversTuple<<::State as UsesInput>::Input, ::State>, + DOT: DifferentialObserversTuple + MatchName, Z: UsesState::State>, { fn run_target( @@ -111,6 +121,27 @@ where } } +impl HasTimeout for DiffExecutor +where + A: HasTimeout, + B: HasTimeout, +{ + #[inline] + fn set_timeout(&mut self, timeout: core::time::Duration) { + self.primary.set_timeout(timeout); + self.secondary.set_timeout(timeout); + } + + #[inline] + fn timeout(&self) -> core::time::Duration { + assert!( + self.primary.timeout() == self.secondary.timeout(), + "Primary and Secondary Executors have different timeouts!" + ); + self.primary.timeout() + } +} + /// Proxy the observers of the inner executors #[derive(Serialize, Deserialize, Debug)] #[serde( @@ -122,34 +153,35 @@ pub struct ProxyObserversTuple { differential: DOT, } -impl ObserversTuple for ProxyObserversTuple +impl ObserversTuple for ProxyObserversTuple where - A: ObserversTuple, - B: ObserversTuple, - DOT: DifferentialObserversTuple, - S: UsesInput, + A: ObserversTuple, + B: ObserversTuple, + DOT: DifferentialObserversTuple + MatchName, + S: HasCorpus, + S::Corpus: Corpus, { - fn pre_exec_all(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { + fn pre_exec_all(&mut self, state: &mut S, input: &I) -> Result<(), Error> { self.differential.pre_exec_all(state, input) } fn post_exec_all( &mut self, state: &mut S, - input: &S::Input, + input: &I, exit_kind: &ExitKind, ) -> Result<(), Error> { self.differential.post_exec_all(state, input, exit_kind) } - fn pre_exec_child_all(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { + fn pre_exec_child_all(&mut self, state: &mut S, input: &I) -> Result<(), Error> { self.differential.pre_exec_child_all(state, input) } fn post_exec_child_all( &mut self, state: &mut S, - input: &S::Input, + input: &I, exit_kind: &ExitKind, ) -> Result<(), Error> { self.differential @@ -157,6 +189,20 @@ where } } +impl Deref for ProxyObserversTuple { + type Target = DOT; + + fn deref(&self) -> &Self::Target { + &self.differential + } +} + +impl DerefMut for ProxyObserversTuple { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.differential + } +} + impl MatchName for ProxyObserversTuple where A: MatchName, @@ -188,22 +234,11 @@ where impl ProxyObserversTuple { fn set(&mut self, primary: &A, secondary: &B) { - self.primary = OwnedMutPtr::Ptr(ptr::from_ref(primary) as *mut A); - self.secondary = OwnedMutPtr::Ptr(ptr::from_ref(secondary) as *mut B); + self.primary = OwnedMutPtr::Ptr(ptr::from_ref(primary).cast_mut()); + self.secondary = OwnedMutPtr::Ptr(ptr::from_ref(secondary).cast_mut()); } } -impl UsesObservers for DiffExecutor -where - A: HasObservers, - B: HasObservers::State>, - OTA: ObserversTuple<::State>, - OTB: ObserversTuple<::State>, - DOT: DifferentialObserversTuple::State>, -{ - type Observers = ProxyObserversTuple; -} - impl UsesState for DiffExecutor where A: UsesState, @@ -213,12 +248,14 @@ where impl HasObservers for DiffExecutor where - A: HasObservers, - B: HasObservers::State>, - OTA: ObserversTuple<::State>, - OTB: ObserversTuple<::State>, - DOT: DifferentialObserversTuple::State>, + A: UsesState + HasObservers, + B: UsesState::State> + HasObservers, + DOT: DifferentialObserversTuple + MatchName, + OTA: ObserversTuple<::Input, ::State>, + OTB: ObserversTuple<::Input, ::State>, { + type Observers = ProxyObserversTuple; + #[inline] fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { unsafe { diff --git a/libafl/src/executors/forkserver.rs b/libafl/src/executors/forkserver.rs index dd0b628afb..f3a403827b 100644 --- a/libafl/src/executors/forkserver.rs +++ b/libafl/src/executors/forkserver.rs @@ -9,7 +9,7 @@ use core::{ use std::{ env, ffi::{OsStr, OsString}, - io::{self, prelude::*, ErrorKind}, + io::{self, ErrorKind, Read, Write}, os::{ fd::{AsRawFd, BorrowedFd}, unix::{io::RawFd, process::CommandExt}, @@ -26,6 +26,7 @@ use libafl_bolts::{ tuples::{Handle, Handled, MatchNameRef, Prepend, RefIndexable}, AsSlice, AsSliceMut, Truncate, }; +use libc::RLIM_INFINITY; use nix::{ sys::{ select::{pselect, FdSet}, @@ -36,13 +37,18 @@ use nix::{ unistd::Pid, }; +use super::HasTimeout; #[cfg(feature = "regex")] -use crate::observers::{get_asan_runtime_flags_with_log_path, AsanBacktraceObserver}; +use crate::observers::{ + get_asan_runtime_flags, get_asan_runtime_flags_with_log_path, AsanBacktraceObserver, +}; use crate::{ executors::{Executor, ExitKind, HasObservers}, - inputs::{HasTargetBytes, Input, UsesInput}, + inputs::{ + BytesInput, HasTargetBytes, Input, NopTargetBytesConverter, TargetBytesConverter, UsesInput, + }, mutators::Tokens, - observers::{MapObserver, Observer, ObserversTuple, UsesObservers}, + observers::{MapObserver, Observer, ObserversTuple}, state::{HasExecutions, State, UsesState}, Error, }; @@ -53,12 +59,24 @@ const FS_NEW_ERROR: i32 = 0xeffe0000_u32 as i32; const FS_NEW_VERSION_MIN: u32 = 1; const FS_NEW_VERSION_MAX: u32 = 1; + +#[allow(clippy::cast_possible_wrap)] +const FS_OPT_ENABLED: i32 = 0x80000001_u32 as i32; + #[allow(clippy::cast_possible_wrap)] const FS_NEW_OPT_MAPSIZE: i32 = 1_u32 as i32; #[allow(clippy::cast_possible_wrap)] -const FS_NEW_OPT_SHDMEM_FUZZ: i32 = 2_u32 as i32; +const FS_OPT_MAPSIZE: i32 = 0x40000000_u32 as i32; + #[allow(clippy::cast_possible_wrap)] -const FS_NEW_OPT_AUTODICT: i32 = 0x00000800_u32 as i32; +const FS_OPT_SHDMEM_FUZZ: i32 = 0x01000000_u32 as i32; +#[allow(clippy::cast_possible_wrap)] +const FS_NEW_OPT_SHDMEM_FUZZ: i32 = 2_u32 as i32; + +#[allow(clippy::cast_possible_wrap)] +const FS_NEW_OPT_AUTODTCT: i32 = 0x00000800_u32 as i32; +#[allow(clippy::cast_possible_wrap)] +const FS_OPT_AUTODTCT: i32 = 0x10000000_u32 as i32; #[allow(clippy::cast_possible_wrap)] const FS_ERROR_MAP_SIZE: i32 = 1_u32 as i32; @@ -75,6 +93,9 @@ const FS_ERROR_OLD_CMPLOG: i32 = 32_u32 as i32; #[allow(clippy::cast_possible_wrap)] const FS_ERROR_OLD_CMPLOG_QEMU: i32 = 64_u32 as i32; +/// Forkserver message. We'll reuse it in a testcase. +const FAILED_TO_START_FORKSERVER_MSG: &str = "Failed to start forkserver"; + fn report_error_and_exit(status: i32) -> Result<(), Error> { /* Report on the error received via the forkserver controller and exit */ match status { @@ -114,6 +135,8 @@ pub trait ConfigTarget { fn setsid(&mut self) -> &mut Self; /// Sets a mem limit fn setlimit(&mut self, memlimit: u64) -> &mut Self; + /// enables core dumps (rlimit = infinity) + fn set_coredump(&mut self, enable: bool) -> &mut Self; /// Sets the stdin fn setstdin(&mut self, fd: RawFd, use_stdin: bool) -> &mut Self; /// Sets the AFL forkserver pipes @@ -200,19 +223,27 @@ impl ConfigTarget for Command { rlim_cur: memlimit, rlim_max: memlimit, }; - let r0 = libc::rlimit { - rlim_cur: 0, - rlim_max: 0, - }; - #[cfg(target_os = "openbsd")] - let mut ret = unsafe { libc::setrlimit(libc::RLIMIT_RSS, &r) }; + let ret = unsafe { libc::setrlimit(libc::RLIMIT_RSS, &r) }; #[cfg(not(target_os = "openbsd"))] - let mut ret = unsafe { libc::setrlimit(libc::RLIMIT_AS, &r) }; + let ret = unsafe { libc::setrlimit(libc::RLIMIT_AS, &r) }; if ret < 0 { return Err(io::Error::last_os_error()); } - ret = unsafe { libc::setrlimit(libc::RLIMIT_CORE, &r0) }; + Ok(()) + }; + // # Safety + // This calls our non-shady function from above. + unsafe { self.pre_exec(func) } + } + + fn set_coredump(&mut self, enable: bool) -> &mut Self { + let func = move || { + let r0 = libc::rlimit { + rlim_cur: if enable { RLIM_INFINITY } else { 0 }, + rlim_max: if enable { RLIM_INFINITY } else { 0 }, + }; + let ret = unsafe { libc::setrlimit(libc::RLIMIT_CORE, &r0) }; if ret < 0 { return Err(io::Error::last_os_error()); } @@ -280,6 +311,10 @@ impl Drop for Forkserver { } } +const fn fs_opt_get_mapsize(x: i32) -> i32 { + ((x & 0x00fffffe) >> 1) + 1 +} + #[allow(clippy::fn_params_excessive_bools)] impl Forkserver { /// Create a new [`Forkserver`] @@ -293,6 +328,8 @@ impl Forkserver { memlimit: u64, is_persistent: bool, is_deferred_frksrv: bool, + dump_asan_logs: bool, + coverage_map_size: Option, debug_output: bool, ) -> Result { Self::with_kill_signal( @@ -304,6 +341,8 @@ impl Forkserver { memlimit, is_persistent, is_deferred_frksrv, + dump_asan_logs, + coverage_map_size, debug_output, KILL_SIGNAL_DEFAULT, ) @@ -322,17 +361,32 @@ impl Forkserver { memlimit: u64, is_persistent: bool, is_deferred_frksrv: bool, + dump_asan_logs: bool, + coverage_map_size: Option, debug_output: bool, kill_signal: Signal, ) -> Result { + let Some(coverage_map_size) = coverage_map_size else { + return Err(Error::unknown("Coverage map size unknown. Use coverage_map_size() to tell the forkserver about the map size.")); + }; + if env::var("AFL_MAP_SIZE").is_err() { log::warn!("AFL_MAP_SIZE not set. If it is unset, the forkserver may fail to start up"); } if env::var("__AFL_SHM_ID").is_err() { - log::warn!("__AFL_SHM_ID not set. It is necessary to set this env, otherwise the forkserver cannot communicate with the fuzzer"); + return Err(Error::unknown("__AFL_SHM_ID not set. It is necessary to set this env, otherwise the forkserver cannot communicate with the fuzzer".to_string())); } + let afl_debug = if let Ok(afl_debug) = env::var("AFL_DEBUG") { + if afl_debug != "1" && afl_debug != "0" { + return Err(Error::illegal_argument("AFL_DEBUG must be either 1 or 0")); + } + afl_debug == "1" + } else { + false + }; + let mut st_pipe = Pipe::new().unwrap(); let mut ctl_pipe = Pipe::new().unwrap(); @@ -343,7 +397,6 @@ impl Forkserver { }; let mut command = Command::new(target); - // Setup args, stdio command .args(args) @@ -351,6 +404,8 @@ impl Forkserver { .stdout(stdout) .stderr(stderr); + command.env("AFL_MAP_SIZE", format!("{coverage_map_size}")); + // Persistent, deferred forkserver if is_persistent { command.env("__AFL_PERSISTENT", "1"); @@ -361,12 +416,20 @@ impl Forkserver { } #[cfg(feature = "regex")] - command.env("ASAN_OPTIONS", get_asan_runtime_flags_with_log_path()); + { + let asan_options = if dump_asan_logs { + get_asan_runtime_flags_with_log_path() + } else { + get_asan_runtime_flags() + }; + command.env("ASAN_OPTIONS", asan_options); + } let fsrv_handle = match command .env("LD_BIND_NOW", "1") .envs(envs) .setlimit(memlimit) + .set_coredump(afl_debug) .setsid() .setstdin(input_filefd, use_stdin) .setpipe( @@ -452,27 +515,53 @@ impl Forkserver { } /// Read from the st pipe - pub fn read_st(&mut self) -> Result<(usize, i32), Error> { + pub fn read_st(&mut self) -> Result { let mut buf: [u8; 4] = [0_u8; 4]; - let rlen = self.st_pipe.read(&mut buf)?; - let val: i32 = i32::from_ne_bytes(buf); - Ok((rlen, val)) + if rlen == size_of::() { + Ok(i32::from_ne_bytes(buf)) + } else { + // NOTE: The underlying API does not guarantee that the read will return + // exactly four bytes, but the chance of this happening is very low. + // This is a sacrifice of correctness for performance. + Err(Error::illegal_state(format!( + "Could not read from st pipe. Expected {} bytes, got {rlen} bytes", + size_of::() + ))) + } } /// Read bytes of any length from the st pipe - pub fn read_st_size(&mut self, size: usize) -> Result<(usize, Vec), Error> { - let mut buf = vec![0; size]; - - let rlen = self.st_pipe.read(&mut buf)?; - Ok((rlen, buf)) + pub fn read_st_of_len(&mut self, size: usize) -> Result, Error> { + let mut buf = Vec::with_capacity(size); + // SAFETY: `buf` will not be returned with `Ok` unless it is filled with `size` bytes. + // So it is ok to set the length to `size` such that the length of `&mut buf` is `size` + // and the `read_exact` call will try to read `size` bytes. + #[allow( + clippy::uninit_vec, + reason = "The vec will be filled right after setting the length." + )] + unsafe { + buf.set_len(size); + } + self.st_pipe.read_exact(&mut buf)?; + Ok(buf) } /// Write to the ctl pipe - pub fn write_ctl(&mut self, val: i32) -> Result { + pub fn write_ctl(&mut self, val: i32) -> Result<(), Error> { let slen = self.ctl_pipe.write(&val.to_ne_bytes())?; - - Ok(slen) + if slen == size_of::() { + Ok(()) + } else { + // NOTE: The underlying API does not guarantee that exactly four bytes + // are written, but the chance of this happening is very low. + // This is a sacrifice of correctness for performance. + Err(Error::illegal_state(format!( + "Could not write to ctl pipe. Expected {} bytes, wrote {slen} bytes", + size_of::() + ))) + } } /// Read a message from the child process. @@ -516,15 +605,17 @@ impl Forkserver { } /// This [`Executor`] can run binaries compiled for AFL/AFL++ that make use of a forkserver. +/// /// Shared memory feature is also available, but you have to set things up in your code. /// Please refer to AFL++'s docs. -pub struct ForkserverExecutor +pub struct ForkserverExecutor where SP: ShMemProvider, { target: OsString, args: Vec, input_file: InputFile, + target_bytes_converter: TC, uses_shmem_testcase: bool, forkserver: Forkserver, observers: OT, @@ -539,8 +630,9 @@ where crash_exitcode: Option, } -impl Debug for ForkserverExecutor +impl Debug for ForkserverExecutor where + TC: Debug, OT: Debug, SP: ShMemProvider, { @@ -549,6 +641,7 @@ where .field("target", &self.target) .field("args", &self.args) .field("input_file", &self.input_file) + .field("target_bytes_converter", &self.target_bytes_converter) .field("uses_shmem_testcase", &self.uses_shmem_testcase) .field("forkserver", &self.forkserver) .field("observers", &self.observers) @@ -557,19 +650,22 @@ where } } -impl ForkserverExecutor<(), (), UnixShMemProvider> { +impl ForkserverExecutor<(), (), (), UnixShMemProvider> { /// Builder for `ForkserverExecutor` #[must_use] - pub fn builder() -> ForkserverExecutorBuilder<'static, UnixShMemProvider> { + pub fn builder( + ) -> ForkserverExecutorBuilder<'static, NopTargetBytesConverter, UnixShMemProvider> + { ForkserverExecutorBuilder::new() } } -impl ForkserverExecutor +impl ForkserverExecutor where - OT: ObserversTuple, + OT: ObserversTuple, S: UsesInput, SP: ShMemProvider, + TC: TargetBytesConverter, { /// The `target` binary that's going to run. pub fn target(&self) -> &OsString { @@ -600,12 +696,118 @@ where pub fn coverage_map_size(&self) -> Option { self.map_size } + + /// Execute input and increase the execution counter. + #[inline] + fn execute_input(&mut self, state: &mut S, input: &TC::Input) -> Result + where + S: HasExecutions, + { + *state.executions_mut() += 1; + + self.execute_input_uncounted(input) + } + + /// Execute input, but side-step the execution counter. + #[inline] + fn execute_input_uncounted(&mut self, input: &TC::Input) -> Result { + let mut exit_kind = ExitKind::Ok; + + let last_run_timed_out = self.forkserver.last_run_timed_out_raw(); + + let mut input_bytes = self.target_bytes_converter.to_target_bytes(input); + let mut input_size = input_bytes.as_slice().len(); + if input_size > self.max_input_size { + // Truncate like AFL++ does + input_size = self.max_input_size; + } else if input_size < self.min_input_size { + // Extend like AFL++ does + input_size = self.min_input_size; + let mut input_bytes_copy = Vec::with_capacity(input_size); + input_bytes_copy + .as_slice_mut() + .copy_from_slice(input_bytes.as_slice()); + input_bytes = OwnedSlice::from(input_bytes_copy); + } + let input_size_in_bytes = input_size.to_ne_bytes(); + if self.uses_shmem_testcase { + debug_assert!( + self.map.is_some(), + "The uses_shmem_testcase() bool can only exist when a map is set" + ); + // # Safety + // Struct can never be created when uses_shmem_testcase is true and map is none. + let map = unsafe { self.map.as_mut().unwrap_unchecked() }; + // The first four bytes declares the size of the shmem. + map.as_slice_mut()[..SHMEM_FUZZ_HDR_SIZE] + .copy_from_slice(&input_size_in_bytes[..SHMEM_FUZZ_HDR_SIZE]); + map.as_slice_mut()[SHMEM_FUZZ_HDR_SIZE..(SHMEM_FUZZ_HDR_SIZE + input_size)] + .copy_from_slice(&input_bytes.as_slice()[..input_size]); + } else { + self.input_file + .write_buf(&input_bytes.as_slice()[..input_size])?; + } + + self.forkserver.set_last_run_timed_out(false); + if let Err(err) = self.forkserver.write_ctl(last_run_timed_out) { + return Err(Error::unknown(format!( + "Unable to request new process from fork server (OOM?): {err:?}" + ))); + } + + let pid = self.forkserver.read_st().map_err(|err| { + Error::unknown(format!( + "Unable to request new process from fork server (OOM?): {err:?}" + )) + })?; + + if pid <= 0 { + return Err(Error::unknown( + "Fork server is misbehaving (OOM?)".to_string(), + )); + } + + self.forkserver.set_child_pid(Pid::from_raw(pid)); + + if let Some(status) = self.forkserver.read_st_timed(&self.timeout)? { + self.forkserver.set_status(status); + let exitcode_is_crash = if let Some(crash_exitcode) = self.crash_exitcode { + (libc::WEXITSTATUS(self.forkserver().status()) as i8) == crash_exitcode + } else { + false + }; + if libc::WIFSIGNALED(self.forkserver().status()) || exitcode_is_crash { + exit_kind = ExitKind::Crash; + #[cfg(feature = "regex")] + if let Some(asan_observer) = self.observers.get_mut(&self.asan_obs) { + asan_observer.parse_asan_output_from_asan_log_file(pid)?; + } + } + } else { + self.forkserver.set_last_run_timed_out(true); + + // We need to kill the child in case he has timed out, or we can't get the correct pid in the next call to self.executor.forkserver_mut().read_st()? + let _ = kill(self.forkserver().child_pid(), self.forkserver.kill_signal); + if let Err(err) = self.forkserver.read_st() { + return Err(Error::unknown(format!( + "Could not kill timed-out child: {err:?}" + ))); + } + exit_kind = ExitKind::Timeout; + } + + if !libc::WIFSTOPPED(self.forkserver().status()) { + self.forkserver.reset_child_pid(); + } + + Ok(exit_kind) + } } /// The builder for `ForkserverExecutor` #[derive(Debug)] #[allow(clippy::struct_excessive_bools)] -pub struct ForkserverExecutorBuilder<'a, SP> { +pub struct ForkserverExecutorBuilder<'a, TC, SP> { program: Option, arguments: Vec, envs: Vec<(OsString, OsString)>, @@ -625,20 +827,25 @@ pub struct ForkserverExecutorBuilder<'a, SP> { #[cfg(feature = "regex")] asan_obs: Option>, crash_exitcode: Option, + target_bytes_converter: TC, } -impl<'a, SP> ForkserverExecutorBuilder<'a, SP> { +impl<'a, TC, SP> ForkserverExecutorBuilder<'a, TC, SP> +where + SP: ShMemProvider, +{ /// Builds `ForkserverExecutor`. /// This Forkserver will attempt to provide inputs over shared mem when `shmem_provider` is given. /// Else this forkserver will pass the input to the target via `stdin` /// in case no input file is specified. /// If `debug_child` is set, the child will print to `stdout`/`stderr`. #[allow(clippy::pedantic)] - pub fn build(&mut self, observers: OT) -> Result, Error> + pub fn build(mut self, observers: OT) -> Result, Error> where - OT: ObserversTuple, + OT: ObserversTuple, S: UsesInput, - S::Input: Input + HasTargetBytes, + S::Input: Input, + TC: TargetBytesConverter, SP: ShMemProvider, { let (forkserver, input_file, map) = self.build_helper()?; @@ -684,25 +891,27 @@ impl<'a, SP> ForkserverExecutorBuilder<'a, SP> { min_input_size: self.min_input_size, max_input_size: self.max_input_size, timeout, + #[cfg(feature = "regex")] asan_obs: self .asan_obs .clone() .unwrap_or(AsanBacktraceObserver::default().handle()), crash_exitcode: self.crash_exitcode, + target_bytes_converter: self.target_bytes_converter, }) } /// Builds `ForkserverExecutor` downsizing the coverage map to fit exaclty the AFL++ map size. #[allow(clippy::pedantic)] pub fn build_dynamic_map( - &mut self, + mut self, mut map_observer: A, other_observers: OT, - ) -> Result, Error> + ) -> Result, Error> where MO: MapObserver + Truncate, // TODO maybe enforce Entry = u8 for the cov map - A: Observer + AsRef + AsMut, - OT: ObserversTuple + Prepend, + A: Observer + AsMut, + OT: ObserversTuple + Prepend, S: UsesInput, S::Input: Input + HasTargetBytes, SP: ShMemProvider, @@ -748,11 +957,13 @@ impl<'a, SP> ForkserverExecutorBuilder<'a, SP> { min_input_size: self.min_input_size, max_input_size: self.max_input_size, timeout, + #[cfg(feature = "regex")] asan_obs: self .asan_obs .clone() .unwrap_or(AsanBacktraceObserver::default().handle()), crash_exitcode: self.crash_exitcode, + target_bytes_converter: self.target_bytes_converter, }) } @@ -794,6 +1005,8 @@ impl<'a, SP> ForkserverExecutorBuilder<'a, SP> { 0, self.is_persistent, self.is_deferred_frksrv, + self.has_asan_obs(), + self.map_size, self.debug_child, self.kill_signal.unwrap_or(KILL_SIGNAL_DEFAULT), )?, @@ -804,39 +1017,59 @@ impl<'a, SP> ForkserverExecutorBuilder<'a, SP> { } }; - let (rlen, version_status) = forkserver.read_st()?; // Initial handshake, read 4-bytes hello message from the forkserver. - - if rlen != 4 { - return Err(Error::unknown("Failed to start a forkserver".to_string())); - } + // Initial handshake, read 4-bytes hello message from the forkserver. + let version_status = forkserver.read_st().map_err(|err| { + Error::illegal_state(format!("{FAILED_TO_START_FORKSERVER_MSG}: {err:?}")) + })?; if (version_status & FS_NEW_ERROR) == FS_NEW_ERROR { report_error_and_exit(version_status & 0x0000ffff)?; } - let keep = version_status; - let version: u32 = version_status as u32 - 0x41464c00_u32; - if (0x41464c00..=0x41464cff).contains(&version_status) { - match version { - 0 => { - return Err(Error::unknown("Fork server version is not assigned, this should not happen. Recompile target.")); - } - FS_NEW_VERSION_MIN..=FS_NEW_VERSION_MAX => { - // good, do nothing - } - _ => { - return Err(Error::unknown( - "Fork server version is not supported. Recompile the target.", - )); - } + if Self::is_old_forkserver(version_status) { + log::info!("Old fork server model is used by the target, this still works though."); + self.initialize_old_forkserver(version_status, map.as_ref(), &mut forkserver)?; + } else { + self.initialize_forkserver(version_status, map.as_ref(), &mut forkserver)?; + } + Ok((forkserver, input_file, map)) + } + + fn is_old_forkserver(version_status: i32) -> bool { + !(0x41464c00..0x41464cff).contains(&version_status) + } + + /// Intialize forkserver > v4.20c + #[allow(clippy::cast_possible_wrap)] + #[allow(clippy::cast_sign_loss)] + fn initialize_forkserver( + &mut self, + status: i32, + map: Option<&SP::ShMem>, + forkserver: &mut Forkserver, + ) -> Result<(), Error> { + let keep = status; + let version: u32 = status as u32 - 0x41464c00_u32; + match version { + 0 => { + return Err(Error::illegal_state("Fork server version is not assigned, this should not happen. Recompile target.")); + } + FS_NEW_VERSION_MIN..=FS_NEW_VERSION_MAX => { + // good, do nothing + } + _ => { + return Err(Error::illegal_state( + "Fork server version is not supported. Recompile the target.", + )); } } - let xored_version_status = (version_status as u32 ^ 0xffffffff) as i32; + let xored_status = (status as u32 ^ 0xffffffff) as i32; - let send_len = forkserver.write_ctl(xored_version_status)?; - if send_len != 4 { - return Err(Error::unknown("Writing to forkserver failed.".to_string())); + if let Err(err) = forkserver.write_ctl(xored_status) { + return Err(Error::illegal_state(format!( + "Writing to forkserver failed: {err:?}" + ))); } log::info!( @@ -844,37 +1077,15 @@ impl<'a, SP> ForkserverExecutorBuilder<'a, SP> { version ); - let (read_len, status) = forkserver.read_st()?; - if read_len != 4 { - return Err(Error::unknown( - "Reading from forkserver failed.".to_string(), - )); - } + let status = forkserver.read_st().map_err(|err| { + Error::illegal_state(format!("Reading from forkserver failed: {err:?}")) + })?; if status & FS_NEW_OPT_MAPSIZE == FS_NEW_OPT_MAPSIZE { - // When 0, we assume that map_size was filled by the user or const - /* TODO autofill map size from the observer - - if map_size > 0 { - self.map_size = Some(map_size as usize); - } - */ - let (read_len, mut map_size) = forkserver.read_st()?; - if read_len != 4 { - return Err(Error::unknown( - "Failed to read map size from forkserver".to_string(), - )); - } - - if map_size % 64 != 0 { - map_size = ((map_size + 63) >> 6) << 6; - } - - // TODO set AFL_MAP_SIZE - assert!(self.map_size.is_none() || map_size as usize <= self.map_size.unwrap()); - - // we'll use this later when we truncate the observer - self.map_size = Some(map_size as usize); + let fsrv_map_size = forkserver.read_st().map_err(|err| { + Error::illegal_state(format!("Failed to read map size from forkserver: {err:?}")) + })?; + self.set_map_size(fsrv_map_size)?; } if status & FS_NEW_OPT_SHDMEM_FUZZ != 0 { @@ -882,51 +1093,160 @@ impl<'a, SP> ForkserverExecutorBuilder<'a, SP> { log::info!("Using SHARED MEMORY FUZZING feature."); self.uses_shmem_testcase = true; } else { - return Err(Error::unknown( + return Err(Error::illegal_state( "Target requested sharedmem fuzzing, but you didn't prepare shmem", )); } } - if status & FS_NEW_OPT_AUTODICT != 0 { + if status & FS_NEW_OPT_AUTODTCT != 0 { // Here unlike shmem input fuzzing, we are forced to read things // hence no self.autotokens.is_some() to check if we proceed - let (read_len, dict_size) = forkserver.read_st()?; - if read_len != 4 { - return Err(Error::unknown( - "Failed to read dictionary size from forkserver".to_string(), - )); - } + let autotokens_size = forkserver.read_st().map_err(|err| { + Error::illegal_state(format!( + "Failed to read autotokens size from forkserver: {err:?}", + )) + })?; - if !(2..=0xffffff).contains(&dict_size) { + let tokens_size_max = 0xffffff; + + if !(2..=tokens_size_max).contains(&autotokens_size) { return Err(Error::illegal_state( - "Dictionary has an illegal size".to_string(), + format!("Autotokens size is incorrect, expected 2 to {tokens_size_max} (inclusive), but got {autotokens_size}. Make sure your afl-cc verison is up to date."), )); } - log::info!("Autodict size {dict_size:x}"); - let (rlen, buf) = forkserver.read_st_size(dict_size as usize)?; - - if rlen != dict_size as usize { - return Err(Error::unknown("Failed to load autodictionary".to_string())); - } + log::info!("Autotokens size {autotokens_size:x}"); + let buf = forkserver + .read_st_of_len(autotokens_size as usize) + .map_err(|err| { + Error::illegal_state(format!("Failed to load autotokens: {err:?}")) + })?; if let Some(t) = &mut self.autotokens { - t.parse_autodict(&buf, dict_size as usize); + t.parse_autodict(&buf, autotokens_size as usize); } } - let (read_len, aflx) = forkserver.read_st()?; - if read_len != 4 { - return Err(Error::unknown("Reading from forkserver failed".to_string())); + let aflx = forkserver.read_st().map_err(|err| { + Error::illegal_state(format!("Reading from forkserver failed: {err:?}")) + })?; + + if aflx != keep { + return Err(Error::unknown(format!( + "Error in forkserver communication ({aflx:?}=>{keep:?})", + ))); + } + Ok(()) + } + + /// Intialize old forkserver. < v4.20c + #[allow(clippy::cast_possible_wrap)] + #[allow(clippy::cast_sign_loss)] + fn initialize_old_forkserver( + &mut self, + status: i32, + map: Option<&SP::ShMem>, + forkserver: &mut Forkserver, + ) -> Result<(), Error> { + if status & FS_OPT_ENABLED == FS_OPT_ENABLED && status & FS_OPT_MAPSIZE == FS_OPT_MAPSIZE { + let fsrv_map_size = fs_opt_get_mapsize(status); + self.set_map_size(fsrv_map_size)?; } - if aflx != version_status { - return Err(Error::unknown(format!( - "Error in forkserver communication ({:x}=>{:x})", - keep, aflx + // Only with SHMEM or AUTODTCT we can send send_status back or it breaks! + // If forkserver is responding, we then check if there's any option enabled. + // We'll send 4-bytes message back to the forkserver to tell which features to use + // The forkserver is listening to our response if either shmem fuzzing is enabled or auto dict is enabled + // + if status & FS_OPT_ENABLED == FS_OPT_ENABLED + && (status & FS_OPT_SHDMEM_FUZZ == FS_OPT_SHDMEM_FUZZ + || status & FS_OPT_AUTODTCT == FS_OPT_AUTODTCT) + { + let mut send_status = FS_OPT_ENABLED; + + if (status & FS_OPT_SHDMEM_FUZZ == FS_OPT_SHDMEM_FUZZ) && map.is_some() { + log::info!("Using SHARED MEMORY FUZZING feature."); + send_status |= FS_OPT_SHDMEM_FUZZ; + self.uses_shmem_testcase = true; + } + + if (status & FS_OPT_AUTODTCT == FS_OPT_AUTODTCT) && self.autotokens.is_some() { + log::info!("Using AUTODTCT feature"); + send_status |= FS_OPT_AUTODTCT; + } + + if send_status != FS_OPT_ENABLED { + // if send_status is not changed (Options are available but we didn't use any), then don't send the next write_ctl message. + // This is important + + if let Err(err) = forkserver.write_ctl(send_status) { + return Err(Error::illegal_state(format!( + "Writing to forkserver failed: {err:?}" + ))); + } + + if (send_status & FS_OPT_AUTODTCT) == FS_OPT_AUTODTCT { + let dict_size = forkserver.read_st().map_err(|err| { + Error::illegal_state(format!("Reading from forkserver failed: {err:?}")) + })?; + + if !(2..=0xffffff).contains(&dict_size) { + return Err(Error::illegal_state( + "Dictionary has an illegal size".to_string(), + )); + } + + log::info!("Autodict size {dict_size:x}"); + + let buf = forkserver + .read_st_of_len(dict_size as usize) + .map_err(|err| { + Error::unknown(format!("Failed to load autodictionary: {err:?}")) + })?; + if let Some(t) = &mut self.autotokens { + t.parse_autodict(&buf, dict_size as usize); + } + } + } + } else { + log::warn!("Forkserver Options are not available."); + } + + Ok(()) + } + + #[allow(clippy::cast_sign_loss)] + fn set_map_size(&mut self, fsrv_map_size: i32) -> Result { + // When 0, we assume that map_size was filled by the user or const + /* TODO autofill map size from the observer + + if fsrv_map_size > 0 { + self.map_size = Some(fsrv_map_size as usize); + } + */ + let mut actual_map_size = fsrv_map_size; + if actual_map_size % 64 != 0 { + actual_map_size = ((actual_map_size + 63) >> 6) << 6; + } + + // TODO set AFL_MAP_SIZE + if let Some(max_size) = self.map_size { + if actual_map_size as usize > max_size { + return Err(Error::illegal_state(format!( + "The target map size is {actual_map_size} but the allocated map size is {max_size}. \ + Increase the initial size of the forkserver map to at least that size using the forkserver builder's `coverage_map_size`." + ))); + } + } else { + return Err(Error::illegal_state(format!( + "The target map size is {actual_map_size} but we did not create a coverage map before launching the target! \ + Set an initial forkserver map to at least that size using the forkserver builder's `coverage_map_size`." ))); } - Ok((forkserver, input_file, map)) + // we'll use this later when we truncate the observer + self.map_size = Some(actual_map_size as usize); + + Ok(actual_map_size as usize) } /// Use autodict? @@ -1138,9 +1458,21 @@ impl<'a, SP> ForkserverExecutorBuilder<'a, SP> { self.kill_signal = Some(kill_signal); self } + + /// Determine if the asan observer is present (always false if feature "regex" is disabled) + #[cfg(feature = "regex")] + pub fn has_asan_obs(&self) -> bool { + self.asan_obs.is_some() + } + + /// Determine if the asan observer is present (always false if feature "regex" is disabled) + #[cfg(not(feature = "regex"))] + pub fn has_asan_obs(&self) -> bool { + false + } } -impl<'a> ForkserverExecutorBuilder<'a, UnixShMemProvider> { +impl<'a> ForkserverExecutorBuilder<'a, NopTargetBytesConverter, UnixShMemProvider> { /// Creates a new `AFL`-style [`ForkserverExecutor`] with the given target, arguments and observers. /// This is the builder for `ForkserverExecutor` /// This Forkserver will attempt to provide inputs over shared mem when `shmem_provider` is given. @@ -1148,7 +1480,8 @@ impl<'a> ForkserverExecutorBuilder<'a, UnixShMemProvider> { /// in case no input file is specified. /// If `debug_child` is set, the child will print to `stdout`/`stderr`. #[must_use] - pub fn new() -> ForkserverExecutorBuilder<'a, UnixShMemProvider> { + pub fn new( + ) -> ForkserverExecutorBuilder<'a, NopTargetBytesConverter, UnixShMemProvider> { ForkserverExecutorBuilder { program: None, arguments: vec![], @@ -1166,17 +1499,24 @@ impl<'a> ForkserverExecutorBuilder<'a, UnixShMemProvider> { min_input_size: MIN_INPUT_SIZE_DEFAULT, kill_signal: None, timeout: None, + #[cfg(feature = "regex")] asan_obs: None, crash_exitcode: None, + target_bytes_converter: NopTargetBytesConverter::new(), } } +} +impl<'a, TC> ForkserverExecutorBuilder<'a, TC, UnixShMemProvider> { /// Shmem provider for forkserver's shared memory testcase feature. pub fn shmem_provider( self, shmem_provider: &'a mut SP, - ) -> ForkserverExecutorBuilder<'a, SP> { + ) -> ForkserverExecutorBuilder<'a, TC, SP> { ForkserverExecutorBuilder { + // Set the new provider + shmem_provider: Some(shmem_provider), + // Copy all other values from the old Builder program: self.program, arguments: self.arguments, envs: self.envs, @@ -1187,30 +1527,66 @@ impl<'a> ForkserverExecutorBuilder<'a, UnixShMemProvider> { is_deferred_frksrv: self.is_deferred_frksrv, autotokens: self.autotokens, input_filename: self.input_filename, - shmem_provider: Some(shmem_provider), map_size: self.map_size, - max_input_size: MAX_INPUT_SIZE_DEFAULT, - min_input_size: MIN_INPUT_SIZE_DEFAULT, - kill_signal: None, - timeout: None, - asan_obs: None, - crash_exitcode: None, + max_input_size: self.max_input_size, + min_input_size: self.min_input_size, + kill_signal: self.kill_signal, + timeout: self.timeout, + #[cfg(feature = "regex")] + asan_obs: self.asan_obs, + crash_exitcode: self.crash_exitcode, + target_bytes_converter: self.target_bytes_converter, } } } -impl<'a> Default for ForkserverExecutorBuilder<'a, UnixShMemProvider> { +impl<'a, TC, SP> ForkserverExecutorBuilder<'a, TC, SP> { + /// Shmem provider for forkserver's shared memory testcase feature. + pub fn target_bytes_converter( + self, + target_bytes_converter: TC2, + ) -> ForkserverExecutorBuilder<'a, TC2, SP> { + ForkserverExecutorBuilder { + // Set the new provider + shmem_provider: self.shmem_provider, + // Copy all other values from the old Builder + program: self.program, + arguments: self.arguments, + envs: self.envs, + debug_child: self.debug_child, + use_stdin: self.use_stdin, + uses_shmem_testcase: self.uses_shmem_testcase, + is_persistent: self.is_persistent, + is_deferred_frksrv: self.is_deferred_frksrv, + autotokens: self.autotokens, + input_filename: self.input_filename, + map_size: self.map_size, + max_input_size: self.max_input_size, + min_input_size: self.min_input_size, + kill_signal: self.kill_signal, + timeout: self.timeout, + #[cfg(feature = "regex")] + asan_obs: self.asan_obs, + crash_exitcode: self.crash_exitcode, + target_bytes_converter, + } + } +} + +impl Default + for ForkserverExecutorBuilder<'_, NopTargetBytesConverter, UnixShMemProvider> +{ fn default() -> Self { Self::new() } } -impl Executor for ForkserverExecutor +impl Executor for ForkserverExecutor where - OT: ObserversTuple, + OT: ObserversTuple, SP: ShMemProvider, S: State + HasExecutions, - S::Input: HasTargetBytes, + TC: TargetBytesConverter, EM: UsesState, Z: UsesState, { @@ -1222,105 +1598,26 @@ where _mgr: &mut EM, input: &Self::Input, ) -> Result { - *state.executions_mut() += 1; - - let mut exit_kind = ExitKind::Ok; - - let last_run_timed_out = self.forkserver.last_run_timed_out_raw(); - - let mut input_bytes = input.target_bytes(); - let mut input_size = input_bytes.as_slice().len(); - if input_size > self.max_input_size { - // Truncate like AFL++ does - input_size = self.max_input_size; - } else if input_size < self.min_input_size { - // Extend like AFL++ does - input_size = self.min_input_size; - let mut input_bytes_copy = Vec::with_capacity(input_size); - input_bytes_copy - .as_slice_mut() - .copy_from_slice(input_bytes.as_slice()); - input_bytes = OwnedSlice::from(input_bytes_copy); - } - let input_size_in_bytes = input_size.to_ne_bytes(); - if self.uses_shmem_testcase { - debug_assert!( - self.map.is_some(), - "The uses_shmem_testcase() bool can only exist when a map is set" - ); - // # Safety - // Struct can never be created when uses_shmem_testcase is true and map is none. - let map = unsafe { self.map.as_mut().unwrap_unchecked() }; - // The first four bytes declares the size of the shmem. - map.as_slice_mut()[..SHMEM_FUZZ_HDR_SIZE] - .copy_from_slice(&input_size_in_bytes[..SHMEM_FUZZ_HDR_SIZE]); - map.as_slice_mut()[SHMEM_FUZZ_HDR_SIZE..(SHMEM_FUZZ_HDR_SIZE + input_size)] - .copy_from_slice(&input_bytes.as_slice()[..input_size]); - } else { - self.input_file - .write_buf(&input_bytes.as_slice()[..input_size])?; - } - - let send_len = self.forkserver.write_ctl(last_run_timed_out)?; - - self.forkserver.set_last_run_timed_out(false); - - if send_len != 4 { - return Err(Error::unknown( - "Unable to request new process from fork server (OOM?)".to_string(), - )); - } - - let (recv_pid_len, pid) = self.forkserver.read_st()?; - if recv_pid_len != 4 { - return Err(Error::unknown( - "Unable to request new process from fork server (OOM?)".to_string(), - )); - } - - if pid <= 0 { - return Err(Error::unknown( - "Fork server is misbehaving (OOM?)".to_string(), - )); - } - - self.forkserver.set_child_pid(Pid::from_raw(pid)); - - if let Some(status) = self.forkserver.read_st_timed(&self.timeout)? { - self.forkserver.set_status(status); - let exitcode_is_crash = if let Some(crash_exitcode) = self.crash_exitcode { - (libc::WEXITSTATUS(self.forkserver().status()) as i8) == crash_exitcode - } else { - false - }; - if libc::WIFSIGNALED(self.forkserver().status()) || exitcode_is_crash { - exit_kind = ExitKind::Crash; - #[cfg(feature = "regex")] - if let Some(asan_observer) = self.observers.get_mut(&self.asan_obs) { - asan_observer.parse_asan_output_from_asan_log_file(pid)?; - } - } - } else { - self.forkserver.set_last_run_timed_out(true); - - // We need to kill the child in case he has timed out, or we can't get the correct pid in the next call to self.executor.forkserver_mut().read_st()? - let _ = kill(self.forkserver().child_pid(), self.forkserver.kill_signal); - let (recv_status_len, _) = self.forkserver.read_st()?; - if recv_status_len != 4 { - return Err(Error::unknown("Could not kill timed-out child".to_string())); - } - exit_kind = ExitKind::Timeout; - } - - if !libc::WIFSTOPPED(self.forkserver().status()) { - self.forkserver.reset_child_pid(); - } - - Ok(exit_kind) + self.execute_input(state, input) } } -impl UsesState for ForkserverExecutor +impl HasTimeout for ForkserverExecutor +where + SP: ShMemProvider, +{ + #[inline] + fn set_timeout(&mut self, timeout: Duration) { + self.timeout = TimeSpec::from_duration(timeout); + } + + #[inline] + fn timeout(&self) -> Duration { + self.timeout.into() + } +} + +impl UsesState for ForkserverExecutor where S: State, SP: ShMemProvider, @@ -1328,21 +1625,14 @@ where type State = S; } -impl UsesObservers for ForkserverExecutor +impl HasObservers for ForkserverExecutor where - OT: ObserversTuple, + OT: ObserversTuple, S: State, SP: ShMemProvider, { type Observers = OT; -} -impl HasObservers for ForkserverExecutor -where - OT: ObserversTuple, - S: State, - SP: ShMemProvider, -{ #[inline] fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { RefIndexable::from(&self.observers) @@ -1366,7 +1656,7 @@ mod tests { use serial_test::serial; use crate::{ - executors::forkserver::ForkserverExecutor, + executors::forkserver::{ForkserverExecutor, FAILED_TO_START_FORKSERVER_MSG}, observers::{ConstMapObserver, HitcountsMapObserver}, Error, }; @@ -1383,7 +1673,7 @@ mod tests { let mut shmem = shmem_provider.new_shmem(MAP_SIZE).unwrap(); shmem.write_to_env("__AFL_SHM_ID").unwrap(); - let shmem_buf = shmem.as_slice_mut(); + let shmem_buf: &mut [u8; MAP_SIZE] = shmem.as_slice_mut().try_into().unwrap(); let edges_observer = HitcountsMapObserver::new(ConstMapObserver::<_, MAP_SIZE>::new( "shared_mem", @@ -1393,6 +1683,7 @@ mod tests { let executor = ForkserverExecutor::builder() .program(bin) .args(args) + .coverage_map_size(MAP_SIZE) .debug_child(false) .shmem_provider(&mut shmem_provider) .build::<_, ()>(tuple_list!(edges_observer)); @@ -1400,10 +1691,13 @@ mod tests { // Since /usr/bin/echo is not a instrumented binary file, the test will just check if the forkserver has failed at the initial handshake let result = match executor { Ok(_) => true, - Err(e) => match e { - Error::Unknown(s, _) => s == "Failed to start a forkserver", - _ => false, - }, + Err(e) => { + println!("Error: {e:?}"); + match e { + Error::IllegalState(s, _) => s.contains(FAILED_TO_START_FORKSERVER_MSG), + _ => false, + } + } }; assert!(result); } diff --git a/libafl/src/executors/hooks/inprocess.rs b/libafl/src/executors/hooks/inprocess.rs index 8733c37a36..f4a927c9c8 100644 --- a/libafl/src/executors/hooks/inprocess.rs +++ b/libafl/src/executors/hooks/inprocess.rs @@ -1,6 +1,6 @@ //! The hook for `InProcessExecutor` -#[cfg(any(unix, feature = "std"))] -use core::ptr::addr_of_mut; +#[cfg(all(target_os = "linux", feature = "std"))] +use core::mem::zeroed; #[cfg(any(unix, all(windows, feature = "std")))] use core::sync::atomic::{compiler_fence, Ordering}; use core::{ @@ -9,8 +9,6 @@ use core::{ ptr::{self, null_mut}, time::Duration, }; -#[cfg(all(target_os = "linux", feature = "std"))] -use core::{mem::zeroed, ptr::addr_of}; #[cfg(all(target_os = "linux", feature = "std"))] use libafl_bolts::current_time; @@ -28,19 +26,19 @@ use crate::executors::hooks::unix::unix_signal_handler; #[cfg(windows)] use crate::state::State; use crate::{ + corpus::Corpus, events::{EventFirer, EventRestarter}, executors::{hooks::ExecutorHook, inprocess::HasInProcessHooks, Executor, HasObservers}, feedbacks::Feedback, inputs::UsesInput, - state::{HasCorpus, HasExecutions, HasSolutions}, + observers::ObserversTuple, + state::{HasCorpus, HasExecutions, HasSolutions, UsesState}, Error, HasObjective, }; + /// The inmem executor's handlers. #[allow(missing_debug_implementations)] -pub struct InProcessHooks -where - S: UsesInput, -{ +pub struct InProcessHooks { /// On crash C function pointer #[cfg(feature = "std")] pub crash_handler: *const c_void, @@ -151,7 +149,7 @@ where libc::timer_settime( self.timer_mut().timerid, 0, - addr_of!(disarmed), + &raw const disarmed, null_mut(), ); } @@ -173,7 +171,7 @@ where libc::timer_settime( self.timer_mut().timerid, 0, - addr_of!(self.timer_mut().itimerspec), + &raw const self.timer_mut().itimerspec, null_mut(), ); } @@ -204,7 +202,7 @@ where fn pre_exec(&mut self, state: &mut S, input: &S::Input) { #[cfg(feature = "std")] unsafe { - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; (*data).crash_handler = self.crash_handler; (*data).timeout_handler = self.timeout_handler; } @@ -217,6 +215,8 @@ where #[allow(clippy::unused_self)] fn post_exec(&mut self, _state: &mut S, _input: &S::Input) { // timeout stuff + // # Safety + // We're calling this only once per execution, in a single thread. #[cfg(all(feature = "std", not(all(miri, target_vendor = "apple"))))] self.timer_mut().unset_timer(); } @@ -232,31 +232,41 @@ where pub fn new(exec_tmout: Duration) -> Result where E: Executor + HasObservers + HasInProcessHooks, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, E::State: HasExecutions + HasSolutions + HasCorpus, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { - #[cfg_attr(miri, allow(unused_variables))] + // # Safety + // We get a pointer to `GLOBAL_STATE` that will be initialized at this point in time. + // This unsafe is needed in stable but not in nightly. Remove in the future(?) + #[allow(unused_unsafe)] + let data = unsafe { &raw mut GLOBAL_STATE }; + #[cfg(feature = "std")] + unix_signal_handler::setup_panic_hook::(); + // # Safety + // Setting up the signal handlers with a pointer to the `GLOBAL_STATE` which should not be NULL at this point. + // We are the sole users of `GLOBAL_STATE` right now, and only dereference it in case of Segfault/Panic. + // In that case we get the mutable borrow. Otherwise we don't use it. + #[cfg(all(not(miri), unix, feature = "std"))] unsafe { - let data = addr_of_mut!(GLOBAL_STATE); - #[cfg(feature = "std")] - unix_signal_handler::setup_panic_hook::(); - #[cfg(all(not(miri), unix, feature = "std"))] setup_signal_handler(data)?; - compiler_fence(Ordering::SeqCst); - Ok(Self { - #[cfg(feature = "std")] - crash_handler: unix_signal_handler::inproc_crash_handler:: - as *const c_void, - #[cfg(feature = "std")] - timeout_handler: unix_signal_handler::inproc_timeout_handler:: - as *const _, - #[cfg(feature = "std")] - timer: TimerStruct::new(exec_tmout), - phantom: PhantomData, - }) } + compiler_fence(Ordering::SeqCst); + Ok(Self { + #[cfg(feature = "std")] + crash_handler: unix_signal_handler::inproc_crash_handler:: + as *const c_void, + #[cfg(feature = "std")] + timeout_handler: unix_signal_handler::inproc_timeout_handler:: + as *const _, + #[cfg(feature = "std")] + timer: TimerStruct::new(exec_tmout), + phantom: PhantomData, + }) } /// Create new [`InProcessHooks`]. @@ -265,15 +275,18 @@ where pub fn new(exec_tmout: Duration) -> Result where E: Executor + HasObservers + HasInProcessHooks, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, E::State: State + HasExecutions + HasSolutions + HasCorpus, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { let ret; #[cfg(feature = "std")] unsafe { - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; crate::executors::hooks::windows::windows_exception_handler::setup_panic_hook::< E, EM, @@ -321,7 +334,7 @@ where where E: Executor + HasObservers + HasInProcessHooks, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, E::State: HasExecutions + HasSolutions + HasCorpus, Z: HasObjective, { @@ -381,28 +394,38 @@ unsafe impl Send for InProcessExecutorHandlerData {} unsafe impl Sync for InProcessExecutorHandlerData {} impl InProcessExecutorHandlerData { + /// # Safety + /// Only safe if not called twice and if the executor is not used from another borrow after this. #[cfg(any(unix, feature = "std"))] - pub(crate) fn executor_mut<'a, E>(&self) -> &'a mut E { + pub(crate) unsafe fn executor_mut<'a, E>(&self) -> &'a mut E { unsafe { (self.executor_ptr as *mut E).as_mut().unwrap() } } + /// # Safety + /// Only safe if not called twice and if the state is not used from another borrow after this. #[cfg(any(unix, feature = "std"))] - pub(crate) fn state_mut<'a, S>(&self) -> &'a mut S { + pub(crate) unsafe fn state_mut<'a, S>(&self) -> &'a mut S { unsafe { (self.state_ptr as *mut S).as_mut().unwrap() } } + /// # Safety + /// Only safe if not called twice and if the event manager is not used from another borrow after this. #[cfg(any(unix, feature = "std"))] - pub(crate) fn event_mgr_mut<'a, EM>(&self) -> &'a mut EM { + pub(crate) unsafe fn event_mgr_mut<'a, EM>(&self) -> &'a mut EM { unsafe { (self.event_mgr_ptr as *mut EM).as_mut().unwrap() } } + /// # Safety + /// Only safe if not called twice and if the fuzzer is not used from another borrow after this. #[cfg(any(unix, feature = "std"))] - pub(crate) fn fuzzer_mut<'a, Z>(&self) -> &'a mut Z { + pub(crate) unsafe fn fuzzer_mut<'a, Z>(&self) -> &'a mut Z { unsafe { (self.fuzzer_ptr as *mut Z).as_mut().unwrap() } } + /// # Safety + /// Only safe if not called concurrently. #[cfg(any(unix, feature = "std"))] - pub(crate) fn take_current_input<'a, I>(&mut self) -> &'a I { + pub(crate) unsafe fn take_current_input<'a, I>(&mut self) -> &'a I { let r = unsafe { (self.current_input_ptr as *const I).as_ref().unwrap() }; self.current_input_ptr = ptr::null(); r @@ -451,37 +474,54 @@ pub(crate) static mut GLOBAL_STATE: InProcessExecutorHandlerData = InProcessExec }; /// Get the inprocess [`crate::state::State`] +/// +/// # Safety +/// Only safe if not called twice and if the state is not accessed from another borrow while this one is alive. #[must_use] -pub fn inprocess_get_state<'a, S>() -> Option<&'a mut S> { +pub unsafe fn inprocess_get_state<'a, S>() -> Option<&'a mut S> { unsafe { (GLOBAL_STATE.state_ptr as *mut S).as_mut() } } /// Get the [`crate::events::EventManager`] +/// +/// # Safety +/// Only safe if not called twice and if the event manager is not accessed from another borrow while this one is alive. #[must_use] -pub fn inprocess_get_event_manager<'a, EM>() -> Option<&'a mut EM> { +pub unsafe fn inprocess_get_event_manager<'a, EM>() -> Option<&'a mut EM> { unsafe { (GLOBAL_STATE.event_mgr_ptr as *mut EM).as_mut() } } /// Gets the inprocess [`crate::fuzzer::Fuzzer`] +/// +/// # Safety +/// Only safe if not called twice and if the fuzzer is not accessed from another borrow while this one is alive. #[must_use] -pub fn inprocess_get_fuzzer<'a, F>() -> Option<&'a mut F> { +pub unsafe fn inprocess_get_fuzzer<'a, F>() -> Option<&'a mut F> { unsafe { (GLOBAL_STATE.fuzzer_ptr as *mut F).as_mut() } } /// Gets the inprocess [`Executor`] +/// +/// # Safety +/// Only safe if not called twice and if the executor is not accessed from another borrow while this one is alive. #[must_use] -pub fn inprocess_get_executor<'a, E>() -> Option<&'a mut E> { +pub unsafe fn inprocess_get_executor<'a, E>() -> Option<&'a mut E> { unsafe { (GLOBAL_STATE.executor_ptr as *mut E).as_mut() } } /// Gets the inprocess input +/// +/// # Safety +/// Only safe if not called concurrently and if the input is not used mutably while this reference is alive. #[must_use] -pub fn inprocess_get_input<'a, I>() -> Option<&'a I> { +pub unsafe fn inprocess_get_input<'a, I>() -> Option<&'a I> { unsafe { (GLOBAL_STATE.current_input_ptr as *const I).as_ref() } } -/// Know if we ar eexecuting in a crash/timeout handler +/// Returns if we are executing in a crash/timeout handler #[must_use] pub fn inprocess_in_handler() -> bool { + // # Safety + // Safe because the state is set up and the handler is a single bool. Worst case we read an old value. unsafe { GLOBAL_STATE.in_handler } } diff --git a/libafl/src/executors/hooks/inprocess_fork.rs b/libafl/src/executors/hooks/inprocess_fork.rs index 819bbe30df..16af31f8b2 100644 --- a/libafl/src/executors/hooks/inprocess_fork.rs +++ b/libafl/src/executors/hooks/inprocess_fork.rs @@ -3,14 +3,14 @@ use alloc::vec::Vec; use core::{ ffi::c_void, marker::PhantomData, - ptr::{addr_of_mut, null}, + ptr::null, sync::atomic::{compiler_fence, Ordering}, }; use std::intrinsics::transmute; #[cfg(not(miri))] use libafl_bolts::os::unix_signals::setup_signal_handler; -use libafl_bolts::os::unix_signals::{ucontext_t, Handler, Signal}; +use libafl_bolts::os::unix_signals::{ucontext_t, Signal, SignalHandler}; use libc::siginfo_t; use crate::{ @@ -21,6 +21,8 @@ use crate::{ HasObservers, }, inputs::UsesInput, + observers::ObserversTuple, + state::UsesState, Error, }; @@ -44,7 +46,7 @@ where /// Call before running a target. fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) { unsafe { - let data = addr_of_mut!(FORK_EXECUTOR_GLOBAL_DATA); + let data = &raw mut FORK_EXECUTOR_GLOBAL_DATA; (*data).crash_handler = self.crash_handler; (*data).timeout_handler = self.timeout_handler; compiler_fence(Ordering::SeqCst); @@ -58,11 +60,12 @@ impl InChildProcessHooks { /// Create new [`InChildProcessHooks`]. pub fn new() -> Result where - E: HasObservers, + E: HasObservers + UsesState, + E::Observers: ObserversTuple<::Input, E::State>, { - #[cfg_attr(miri, allow(unused_variables))] + #[cfg_attr(miri, allow(unused_variables, unused_unsafe))] unsafe { - let data = addr_of_mut!(FORK_EXECUTOR_GLOBAL_DATA); + let data = &raw mut FORK_EXECUTOR_GLOBAL_DATA; // child_signal_handlers::setup_child_panic_hook::(); #[cfg(not(miri))] setup_signal_handler(data)?; @@ -107,19 +110,21 @@ unsafe impl Sync for InProcessForkExecutorGlobalData {} unsafe impl Send for InProcessForkExecutorGlobalData {} impl InProcessForkExecutorGlobalData { - pub(crate) fn executor_mut<'a, E>(&self) -> &'a mut E { + /// # Safety + /// Only safe if not called twice and if the executor is not used from another borrow after this. + pub(crate) unsafe fn executor_mut<'a, E>(&self) -> &'a mut E { unsafe { (self.executor_ptr as *mut E).as_mut().unwrap() } } - pub(crate) fn state_mut<'a, S>(&self) -> &'a mut S { + /// # Safety + /// Only safe if not called twice and if the state is not used from another borrow after this. + pub(crate) unsafe fn state_mut<'a, S>(&self) -> &'a mut S { unsafe { (self.state_ptr as *mut S).as_mut().unwrap() } } - /*fn current_input<'a, I>(&self) -> &'a I { - unsafe { (self.current_input_ptr as *const I).as_ref().unwrap() } - }*/ - - pub(crate) fn take_current_input<'a, I>(&mut self) -> &'a I { + /// # Safety + /// Only safe if not called concurrently. + pub(crate) unsafe fn take_current_input<'a, I>(&mut self) -> &'a I { let r = unsafe { (self.current_input_ptr as *const I).as_ref().unwrap() }; self.current_input_ptr = null(); r @@ -131,7 +136,6 @@ impl InProcessForkExecutorGlobalData { } /// a static variable storing the global state - pub(crate) static mut FORK_EXECUTOR_GLOBAL_DATA: InProcessForkExecutorGlobalData = InProcessForkExecutorGlobalData { executor_ptr: null(), @@ -141,31 +145,26 @@ pub(crate) static mut FORK_EXECUTOR_GLOBAL_DATA: InProcessForkExecutorGlobalData timeout_handler: null(), }; -impl Handler for InProcessForkExecutorGlobalData { - fn handle(&mut self, signal: Signal, info: &mut siginfo_t, context: Option<&mut ucontext_t>) { +impl SignalHandler for InProcessForkExecutorGlobalData { + unsafe fn handle( + &mut self, + signal: Signal, + info: &mut siginfo_t, + context: Option<&mut ucontext_t>, + ) { match signal { Signal::SigUser2 | Signal::SigAlarm => unsafe { if !FORK_EXECUTOR_GLOBAL_DATA.timeout_handler.is_null() { let func: ForkHandlerFuncPtr = transmute(FORK_EXECUTOR_GLOBAL_DATA.timeout_handler); - (func)( - signal, - info, - context, - addr_of_mut!(FORK_EXECUTOR_GLOBAL_DATA), - ); + (func)(signal, info, context, &raw mut FORK_EXECUTOR_GLOBAL_DATA); } }, _ => unsafe { if !FORK_EXECUTOR_GLOBAL_DATA.crash_handler.is_null() { let func: ForkHandlerFuncPtr = transmute(FORK_EXECUTOR_GLOBAL_DATA.crash_handler); - (func)( - signal, - info, - context, - addr_of_mut!(FORK_EXECUTOR_GLOBAL_DATA), - ); + (func)(signal, info, context, &raw mut FORK_EXECUTOR_GLOBAL_DATA); } }, } diff --git a/libafl/src/executors/hooks/intel_pt.rs b/libafl/src/executors/hooks/intel_pt.rs new file mode 100644 index 0000000000..f4acce679e --- /dev/null +++ b/libafl/src/executors/hooks/intel_pt.rs @@ -0,0 +1,106 @@ +use core::fmt::Debug; +use std::{ + ptr::slice_from_raw_parts_mut, + string::{String, ToString}, +}; + +use libafl_intelpt::{error_from_pt_error, IntelPT}; +use libipt::{Asid, Image, SectionCache}; +use num_traits::SaturatingAdd; +use serde::Serialize; +use typed_builder::TypedBuilder; + +use crate::{ + executors::{hooks::ExecutorHook, HasObservers}, + inputs::UsesInput, + Error, +}; + +/// Info of a binary's section that can be used during `Intel PT` traces decoding +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Section { + /// Path of the binary + pub file_path: String, + /// Offset of the section in the file + pub file_offset: u64, + /// Size of the section + pub size: u64, + /// Start virtual address of the section once loaded in memory + pub virtual_address: u64, +} + +/// Hook to enable Intel Processor Trace (PT) tracing +#[derive(TypedBuilder)] +pub struct IntelPTHook { + #[builder(default = IntelPT::builder().build().unwrap())] + intel_pt: IntelPT, + #[builder(setter(transform = |sections: &[Section]| sections_to_image(sections).unwrap()))] + image: (Image<'static>, SectionCache<'static>), + map_ptr: *mut T, + map_len: usize, +} + +//fixme: just derive(Debug) once https://github.com/sum-catnip/libipt-rs/pull/4 will be on crates.io +impl Debug for IntelPTHook { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + f.debug_struct("IntelPTHook") + .field("intel_pt", &self.intel_pt) + .field("map_ptr", &self.map_ptr) + .field("map_len", &self.map_len) + .finish() + } +} + +impl ExecutorHook for IntelPTHook +where + S: UsesInput + Serialize, + T: SaturatingAdd + From + Debug, +{ + fn init(&mut self, _state: &mut S) {} + + fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) { + self.intel_pt.enable_tracing().unwrap(); + } + + fn post_exec(&mut self, _state: &mut S, _input: &S::Input) { + self.intel_pt.disable_tracing().unwrap(); + + let slice = unsafe { &mut *slice_from_raw_parts_mut(self.map_ptr, self.map_len) }; + let _ = self + .intel_pt + .decode_traces_into_map(&mut self.image.0, slice) + .inspect_err(|e| log::warn!("Intel PT trace decoding failed: {e}")); + } +} + +// It would be nice to have this as a `TryFrom>`, but Rust's orphan rule doesn't +// like this (and `TryFromIter` is not a thing atm) +fn sections_to_image( + sections: &[Section], +) -> Result<(Image<'static>, SectionCache<'static>), Error> { + let mut image_cache = SectionCache::new(Some("image_cache")).map_err(error_from_pt_error)?; + let mut image = Image::new(Some("image")).map_err(error_from_pt_error)?; + + for s in sections { + let isid = image_cache.add_file(&s.file_path, s.file_offset, s.size, s.virtual_address); + if let Err(e) = isid { + log::warn!( + "Error while caching {} {} - skipped", + s.file_path, + e.to_string() + ); + continue; + } + + if let Err(e) = image.add_cached(&mut image_cache, isid.unwrap(), Asid::default()) { + log::warn!( + "Error while adding cache to image {} {} - skipped", + s.file_path, + e.to_string() + ); + continue; + } + } + + Ok((image, image_cache)) +} diff --git a/libafl/src/executors/hooks/mod.rs b/libafl/src/executors/hooks/mod.rs index 35453192fa..4296c3a16d 100644 --- a/libafl/src/executors/hooks/mod.rs +++ b/libafl/src/executors/hooks/mod.rs @@ -22,6 +22,10 @@ pub mod inprocess; #[cfg(feature = "std")] pub mod timer; +/// Intel Processor Trace (PT) +#[cfg(all(feature = "intel_pt", target_os = "linux"))] +pub mod intel_pt; + /// The hook that runs before and after the executor runs the target pub trait ExecutorHook where diff --git a/libafl/src/executors/hooks/timer.rs b/libafl/src/executors/hooks/timer.rs index f81202f191..21d5541348 100644 --- a/libafl/src/executors/hooks/timer.rs +++ b/libafl/src/executors/hooks/timer.rs @@ -1,12 +1,7 @@ //! The struct `TimerStruct` will absorb all the difference in timeout implementation in various system. -#[cfg(any(windows, target_os = "linux"))] -use core::ptr::addr_of_mut; use core::time::Duration; #[cfg(target_os = "linux")] -use core::{ - mem::zeroed, - ptr::{addr_of, null_mut}, -}; +use core::{mem::zeroed, ptr::null_mut}; #[cfg(all(unix, not(target_os = "linux")))] pub(crate) const ITIMER_REAL: core::ffi::c_int = 0; @@ -187,7 +182,7 @@ impl TimerStruct { let ptp_timer = unsafe { CreateThreadpoolTimer( Some(timeout_handler), - Some(addr_of_mut!(GLOBAL_STATE) as *mut c_void), + Some(&raw mut GLOBAL_STATE as *mut c_void), Some(&TP_CALLBACK_ENVIRON_V3::default()), ) } @@ -227,7 +222,7 @@ impl TimerStruct { unsafe { #[cfg(not(miri))] // creates a new per-process interval timer - libc::timer_create(libc::CLOCK_MONOTONIC, null_mut(), addr_of_mut!(timerid)); + libc::timer_create(libc::CLOCK_MONOTONIC, null_mut(), &raw mut timerid); } Self { @@ -247,6 +242,11 @@ impl TimerStruct { #[cfg(target_os = "linux")] #[must_use] /// Constructor but use batch mode + /// More efficient timeout mechanism with imprecise timing. + /// + /// The timeout will trigger after t seconds and at most within 2*t seconds. + /// This means the actual timeout may occur anywhere in the range [t, 2*t], + /// providing a flexible but bounded execution time limit. pub fn batch_mode(exec_tmout: Duration) -> Self { let mut me = Self::new(exec_tmout); me.batch_mode = true; @@ -256,6 +256,8 @@ impl TimerStruct { #[cfg(all(unix, not(target_os = "linux")))] /// Set up timer pub fn set_timer(&mut self) { + // # Safety + // Safe because the variables are all alive at this time and don't contain pointers. unsafe { setitimer(ITIMER_REAL, &mut self.itimerval, core::ptr::null_mut()); } @@ -266,12 +268,12 @@ impl TimerStruct { /// Set timer pub fn set_timer(&mut self) { unsafe { - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; - write_volatile(addr_of_mut!((*data).ptp_timer), Some(*self.ptp_timer())); + write_volatile(&raw mut (*data).ptp_timer, Some(*self.ptp_timer())); write_volatile( - addr_of_mut!((*data).critical), - addr_of_mut!(*self.critical_mut()) as *mut c_void, + &raw mut (*data).critical, + &raw mut (*self.critical_mut()) as *mut c_void, ); let tm: i64 = -self.milli_sec() * 10 * 1000; let ft = FILETIME { @@ -298,48 +300,50 @@ impl TimerStruct { unsafe { if self.batch_mode { if self.executions == 0 { - libc::timer_settime(self.timerid, 0, addr_of_mut!(self.itimerspec), null_mut()); + libc::timer_settime(self.timerid, 0, &raw mut self.itimerspec, null_mut()); self.tmout_start_time = current_time(); } self.start_time = current_time(); } else { #[cfg(not(miri))] - libc::timer_settime(self.timerid, 0, addr_of_mut!(self.itimerspec), null_mut()); + libc::timer_settime(self.timerid, 0, &raw mut self.itimerspec, null_mut()); } } } #[cfg(all(unix, not(target_os = "linux")))] - /// Disalarm timer + /// Disable the timer pub fn unset_timer(&mut self) { + // # Safety + // No user-provided values. unsafe { let mut itimerval_zero: Itimerval = core::mem::zeroed(); setitimer(ITIMER_REAL, &mut itimerval_zero, core::ptr::null_mut()); } } - /// Disalarm timer + /// Disable the timer #[cfg(target_os = "linux")] #[allow(unused_variables)] pub fn unset_timer(&mut self) { + // # Safety + // Just API calls, no user-provided inputs if self.batch_mode { unsafe { let elapsed = current_time().saturating_sub(self.tmout_start_time); - let elapsed_since_signal = current_time().saturating_sub(self.tmout_start_time); // elapsed may be > than tmout in case of received but ingored signal if elapsed > self.exec_tmout || self.exec_tmout.saturating_sub(elapsed) < self.avg_exec_time * self.avg_mul_k { let disarmed: libc::itimerspec = zeroed(); - libc::timer_settime(self.timerid, 0, addr_of!(disarmed), null_mut()); + libc::timer_settime(self.timerid, 0, &raw const disarmed, null_mut()); // set timer the next exec if self.executions > 0 { self.avg_exec_time = elapsed / self.executions; self.executions = 0; } // readjust K - if elapsed_since_signal > self.exec_tmout * self.avg_mul_k && self.avg_mul_k > 1 - { + if elapsed > self.exec_tmout * self.avg_mul_k && self.avg_mul_k > 1 { self.avg_mul_k -= 1; } } else { @@ -350,16 +354,18 @@ impl TimerStruct { unsafe { let disarmed: libc::itimerspec = zeroed(); #[cfg(not(miri))] - libc::timer_settime(self.timerid, 0, addr_of!(disarmed), null_mut()); + libc::timer_settime(self.timerid, 0, &raw const disarmed, null_mut()); } } } #[cfg(windows)] - /// Disalarm + /// Disable the timer pub fn unset_timer(&mut self) { + // # Safety + // The value accesses are guarded by a critical section. unsafe { - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; compiler_fence(Ordering::SeqCst); EnterCriticalSection(self.critical_mut()); diff --git a/libafl/src/executors/hooks/unix.rs b/libafl/src/executors/hooks/unix.rs index 2f127ccd80..a63adb0563 100644 --- a/libafl/src/executors/hooks/unix.rs +++ b/libafl/src/executors/hooks/unix.rs @@ -2,13 +2,14 @@ #[cfg(unix)] pub mod unix_signal_handler { use alloc::{boxed::Box, string::String, vec::Vec}; - use core::{mem::transmute, ptr::addr_of_mut}; + use core::mem::transmute; use std::{io::Write, panic}; - use libafl_bolts::os::unix_signals::{ucontext_t, Handler, Signal}; + use libafl_bolts::os::unix_signals::{ucontext_t, Signal, SignalHandler}; use libc::siginfo_t; use crate::{ + corpus::Corpus, events::{EventFirer, EventRestarter}, executors::{ common_signals, @@ -19,7 +20,8 @@ pub mod unix_signal_handler { feedbacks::Feedback, fuzzer::HasObjective, inputs::{Input, UsesInput}, - state::{HasCorpus, HasExecutions, HasSolutions}, + observers::ObserversTuple, + state::{HasCorpus, HasExecutions, HasSolutions, UsesState}, }; pub(crate) type HandlerFuncPtr = unsafe fn( @@ -29,7 +31,7 @@ pub mod unix_signal_handler { data: *mut InProcessExecutorHandlerData, ); - /// A handler that does nothing. + // A handler that does nothing. /*pub fn nop_handler( _signal: Signal, _info: &mut siginfo_t, @@ -39,15 +41,17 @@ pub mod unix_signal_handler { }*/ #[cfg(unix)] - impl Handler for InProcessExecutorHandlerData { - fn handle( + impl SignalHandler for InProcessExecutorHandlerData { + /// # Safety + /// This will access global state. + unsafe fn handle( &mut self, signal: Signal, info: &mut siginfo_t, context: Option<&mut ucontext_t>, ) { unsafe { - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; let in_handler = (*data).set_in_handler(true); match signal { Signal::SigUser2 | Signal::SigAlarm => { @@ -75,16 +79,19 @@ pub mod unix_signal_handler { /// invokes the `post_exec` hook on all observer in case of panic pub fn setup_panic_hook() where - E: HasObservers, + E: Executor + HasObservers, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, E::State: HasExecutions + HasSolutions + HasCorpus, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { let old_hook = panic::take_hook(); panic::set_hook(Box::new(move |panic_info| unsafe { old_hook(panic_info); - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; let in_handler = (*data).set_in_handler(true); if (*data).is_valid() { // We are fuzzing! @@ -122,11 +129,14 @@ pub mod unix_signal_handler { _context: Option<&mut ucontext_t>, data: &mut InProcessExecutorHandlerData, ) where - E: HasObservers + HasInProcessHooks, + E: Executor + HasInProcessHooks + HasObservers, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, E::State: HasExecutions + HasSolutions + HasCorpus, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { // this stuff is for batch timeout if !data.executor_ptr.is_null() @@ -178,10 +188,13 @@ pub mod unix_signal_handler { data: &mut InProcessExecutorHandlerData, ) where E: Executor + HasObservers, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, E::State: HasExecutions + HasSolutions + HasCorpus, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { #[cfg(all(target_os = "android", target_arch = "aarch64"))] let _context = _context.map(|p| { @@ -204,7 +217,7 @@ pub mod unix_signal_handler { let mut bsod = Vec::new(); { let mut writer = std::io::BufWriter::new(&mut bsod); - let _ = writeln!(writer, "input: {:?}", input.generate_name(0)); + let _ = writeln!(writer, "input: {:?}", input.generate_name(None)); let bsod = libafl_bolts::minibsod::generate_minibsod( &mut writer, signal, diff --git a/libafl/src/executors/hooks/windows.rs b/libafl/src/executors/hooks/windows.rs index d771f1d0ad..d4ec7829ae 100644 --- a/libafl/src/executors/hooks/windows.rs +++ b/libafl/src/executors/hooks/windows.rs @@ -2,16 +2,14 @@ #[cfg(all(windows, feature = "std"))] pub mod windows_asan_handler { use alloc::string::String; - use core::{ - ptr::addr_of_mut, - sync::atomic::{compiler_fence, Ordering}, - }; + use core::sync::atomic::{compiler_fence, Ordering}; use windows::Win32::System::Threading::{ EnterCriticalSection, LeaveCriticalSection, CRITICAL_SECTION, }; use crate::{ + corpus::Corpus, events::{EventFirer, EventRestarter}, executors::{ hooks::inprocess::GLOBAL_STATE, inprocess::run_observers_and_save_state, Executor, @@ -20,7 +18,8 @@ pub mod windows_asan_handler { feedbacks::Feedback, fuzzer::HasObjective, inputs::UsesInput, - state::{HasCorpus, HasExecutions, HasSolutions}, + observers::ObserversTuple, + state::{HasCorpus, HasExecutions, HasSolutions, UsesState}, }; /// # Safety @@ -29,11 +28,14 @@ pub mod windows_asan_handler { where E: Executor + HasObservers, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, E::State: HasExecutions + HasSolutions + HasCorpus, + E::Observers: ObserversTuple<::Input, E::State>, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; (*data).set_in_handler(true); // Have we set a timer_before? if (*data).ptp_timer.is_some() { @@ -109,7 +111,6 @@ pub mod windows_exception_handler { ffi::c_void, mem::transmute, ptr, - ptr::addr_of_mut, sync::atomic::{compiler_fence, Ordering}, }; #[cfg(feature = "std")] @@ -118,13 +119,15 @@ pub mod windows_exception_handler { use std::panic; use libafl_bolts::os::windows_exceptions::{ - ExceptionCode, Handler, CRASH_EXCEPTIONS, EXCEPTION_HANDLERS_SIZE, EXCEPTION_POINTERS, + ExceptionCode, ExceptionHandler, CRASH_EXCEPTIONS, EXCEPTION_HANDLERS_SIZE, + EXCEPTION_POINTERS, }; use windows::Win32::System::Threading::{ EnterCriticalSection, ExitProcess, LeaveCriticalSection, CRITICAL_SECTION, }; use crate::{ + corpus::Corpus, events::{EventFirer, EventRestarter}, executors::{ hooks::inprocess::{HasTimeout, InProcessExecutorHandlerData, GLOBAL_STATE}, @@ -134,7 +137,8 @@ pub mod windows_exception_handler { feedbacks::Feedback, fuzzer::HasObjective, inputs::{Input, UsesInput}, - state::{HasCorpus, HasExecutions, HasSolutions, State}, + observers::ObserversTuple, + state::{HasCorpus, HasExecutions, HasSolutions, State, UsesState}, }; pub(crate) type HandlerFuncPtr = @@ -147,11 +151,17 @@ pub mod windows_exception_handler { ) { }*/ - impl Handler for InProcessExecutorHandlerData { + impl ExceptionHandler for InProcessExecutorHandlerData { + /// # Safety + /// Will dereference `EXCEPTION_POINTERS` and access `GLOBAL_STATE`. #[allow(clippy::not_unsafe_ptr_arg_deref)] - fn handle(&mut self, _code: ExceptionCode, exception_pointers: *mut EXCEPTION_POINTERS) { + unsafe fn handle( + &mut self, + _code: ExceptionCode, + exception_pointers: *mut EXCEPTION_POINTERS, + ) { unsafe { - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; let in_handler = (*data).set_in_handler(true); if !(*data).crash_handler.is_null() { let func: HandlerFuncPtr = transmute((*data).crash_handler); @@ -175,15 +185,18 @@ pub mod windows_exception_handler { #[cfg(feature = "std")] pub fn setup_panic_hook() where - E: HasObservers, + E: HasObservers + Executor, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, E::State: HasExecutions + HasSolutions + HasCorpus, + E::Observers: ObserversTuple<::Input, E::State>, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { let old_hook = panic::take_hook(); panic::set_hook(Box::new(move |panic_info| unsafe { - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; let in_handler = (*data).set_in_handler(true); // Have we set a timer_before? if (*data).ptp_timer.is_some() { @@ -235,11 +248,14 @@ pub mod windows_exception_handler { global_state: *mut c_void, _p1: *mut u8, ) where - E: HasObservers + HasInProcessHooks, + E: HasObservers + HasInProcessHooks + Executor, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, E::State: State + HasExecutions + HasSolutions + HasCorpus, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { let data: &mut InProcessExecutorHandlerData = &mut *(global_state as *mut InProcessExecutorHandlerData); @@ -306,10 +322,13 @@ pub mod windows_exception_handler { data: &mut InProcessExecutorHandlerData, ) where E: Executor + HasObservers, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, E::State: HasExecutions + HasSolutions + HasCorpus, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { // Have we set a timer_before? if data.ptp_timer.is_some() { @@ -400,7 +419,7 @@ pub mod windows_exception_handler { let mut bsod = Vec::new(); { let mut writer = std::io::BufWriter::new(&mut bsod); - writeln!(writer, "input: {:?}", input.generate_name(0)).unwrap(); + writeln!(writer, "input: {:?}", input.generate_name(None)).unwrap(); libafl_bolts::minibsod::generate_minibsod(&mut writer, exception_pointers) .unwrap(); writer.flush().unwrap(); diff --git a/libafl/src/executors/inprocess/inner.rs b/libafl/src/executors/inprocess/inner.rs index e0d850ac93..cb6d38ae76 100644 --- a/libafl/src/executors/inprocess/inner.rs +++ b/libafl/src/executors/inprocess/inner.rs @@ -2,7 +2,7 @@ use core::{ ffi::c_void, fmt::{self, Debug, Formatter}, marker::PhantomData, - ptr::{self, addr_of_mut, null, write_volatile}, + ptr::{self, null, write_volatile}, sync::atomic::{compiler_fence, Ordering}, time::Duration, }; @@ -16,6 +16,7 @@ use crate::executors::hooks::inprocess::HasTimeout; #[cfg(all(windows, feature = "std"))] use crate::executors::hooks::inprocess::HasTimeout; use crate::{ + corpus::Corpus, events::{EventFirer, EventRestarter}, executors::{ hooks::{ @@ -28,18 +29,13 @@ use crate::{ feedbacks::Feedback, fuzzer::HasObjective, inputs::UsesInput, - observers::{ObserversTuple, UsesObservers}, + observers::ObserversTuple, state::{HasCorpus, HasExecutions, HasSolutions, State, UsesState}, Error, }; /// The internal state of `GenericInProcessExecutor`. -pub struct GenericInProcessExecutorInner -where - HT: ExecutorHooksTuple, - OT: ObserversTuple, - S: State, -{ +pub struct GenericInProcessExecutorInner { /// The observers, observing each run pub(super) observers: OT, // Crash and timeout hah @@ -49,9 +45,7 @@ where impl Debug for GenericInProcessExecutorInner where - HT: ExecutorHooksTuple, - OT: ObserversTuple + Debug, - S: State, + OT: Debug, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("GenericInProcessExecutorState") @@ -62,28 +56,18 @@ where impl UsesState for GenericInProcessExecutorInner where - HT: ExecutorHooksTuple, - OT: ObserversTuple, S: State, { type State = S; } -impl UsesObservers for GenericInProcessExecutorInner +impl HasObservers for GenericInProcessExecutorInner where - HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State, { type Observers = OT; -} -impl HasObservers for GenericInProcessExecutorInner -where - HT: ExecutorHooksTuple, - OT: ObserversTuple, - S: State, -{ #[inline] fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { RefIndexable::from(&self.observers) @@ -98,7 +82,6 @@ where impl GenericInProcessExecutorInner where HT: ExecutorHooksTuple, - OT: ObserversTuple, S: State, { /// This function marks the boundary between the fuzzer and the target @@ -116,24 +99,24 @@ where executor_ptr: *const c_void, ) { unsafe { - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; write_volatile( - addr_of_mut!((*data).current_input_ptr), + &raw mut (*data).current_input_ptr, ptr::from_ref(input) as *const c_void, ); - write_volatile(addr_of_mut!((*data).executor_ptr), executor_ptr); + write_volatile(&raw mut (*data).executor_ptr, executor_ptr); // Direct raw pointers access /aliasing is pretty undefined behavior. // Since the state and event may have moved in memory, refresh them right before the signal may happen write_volatile( - addr_of_mut!((*data).state_ptr), + &raw mut ((*data).state_ptr), ptr::from_mut(state) as *mut c_void, ); write_volatile( - addr_of_mut!((*data).event_mgr_ptr), + &raw mut (*data).event_mgr_ptr, ptr::from_mut(mgr) as *mut c_void, ); write_volatile( - addr_of_mut!((*data).fuzzer_ptr), + &raw mut (*data).fuzzer_ptr, ptr::from_mut(fuzzer) as *mut c_void, ); compiler_fence(Ordering::SeqCst); @@ -150,9 +133,9 @@ where _input: &::Input, ) { unsafe { - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; - write_volatile(addr_of_mut!((*data).current_input_ptr), null()); + write_volatile(&raw mut (*data).current_input_ptr, null()); compiler_fence(Ordering::SeqCst); } } @@ -161,8 +144,8 @@ where impl GenericInProcessExecutorInner where HT: ExecutorHooksTuple, - OT: ObserversTuple, - S: HasExecutions + HasSolutions + HasCorpus + State, + OT: ObserversTuple, + S: HasCorpus + HasExecutions + HasSolutions + UsesInput, { /// Create a new in mem executor with the default timeout (5 sec) pub fn generic( @@ -174,10 +157,13 @@ where ) -> Result where E: Executor + HasObservers + HasInProcessHooks, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { Self::with_timeout_generic::( user_hooks, @@ -201,10 +187,13 @@ where ) -> Result where E: Executor + HasObservers + HasInProcessHooks, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { let mut me = Self::with_timeout_generic::( user_hooks, observers, fuzzer, state, event_mgr, exec_tmout, @@ -231,10 +220,13 @@ where ) -> Result where E: Executor + HasObservers + HasInProcessHooks, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { let default = InProcessHooks::new::(timeout)?; let mut hooks = tuple_list!(default).merge(user_hooks); @@ -285,9 +277,7 @@ where impl HasInProcessHooks for GenericInProcessExecutorInner where - HT: ExecutorHooksTuple, - OT: ObserversTuple, - S: State + HasExecutions + HasSolutions + HasCorpus, + S: UsesInput, { /// the timeout handler #[inline] diff --git a/libafl/src/executors/inprocess/mod.rs b/libafl/src/executors/inprocess/mod.rs index 901dd31b56..e326743cce 100644 --- a/libafl/src/executors/inprocess/mod.rs +++ b/libafl/src/executors/inprocess/mod.rs @@ -5,8 +5,6 @@ #![allow(clippy::needless_pass_by_value)] use alloc::boxed::Box; -#[cfg(any(unix, feature = "std"))] -use core::ptr::addr_of_mut; use core::{ borrow::BorrowMut, ffi::c_void, @@ -31,10 +29,12 @@ use crate::{ feedbacks::Feedback, fuzzer::HasObjective, inputs::UsesInput, - observers::{ObserversTuple, UsesObservers}, + observers::ObserversTuple, state::{HasCorpus, HasCurrentTestcase, HasExecutions, HasSolutions, State, UsesState}, Error, HasMetadata, }; +#[cfg(any(unix, feature = "std"))] +use crate::{ExecutionProcessor, HasScheduler}; /// The inner structure of `InProcessExecutor`. pub mod inner; @@ -63,7 +63,7 @@ where H: FnMut(&S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State, { harness_fn: HB, @@ -76,7 +76,7 @@ where H: FnMut(&S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple + Debug, + OT: ObserversTuple + Debug, S: State, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { @@ -92,30 +92,19 @@ where H: FnMut(&S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State, { type State = S; } -impl UsesObservers for GenericInProcessExecutor -where - H: FnMut(&S::Input) -> ExitKind + ?Sized, - HB: BorrowMut, - HT: ExecutorHooksTuple, - OT: ObserversTuple, - S: State, -{ - type Observers = OT; -} - impl Executor for GenericInProcessExecutor where EM: UsesState, H: FnMut(&S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State + HasExecutions, Z: UsesState, { @@ -147,9 +136,11 @@ where H: FnMut(&S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State, { + type Observers = OT; + #[inline] fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { self.inner.observers() @@ -164,8 +155,10 @@ where impl<'a, H, OT, S> InProcessExecutor<'a, H, OT, S> where H: FnMut(&S::Input) -> ExitKind + ?Sized, - OT: ObserversTuple, + OT: ObserversTuple, S: HasExecutions + HasSolutions + HasCorpus + State, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { /// Create a new in mem executor with the default timeout (5 sec) pub fn new( @@ -178,7 +171,7 @@ where where Self: Executor + HasObservers, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, { @@ -206,9 +199,11 @@ where where Self: Executor, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { let inner = GenericInProcessExecutorInner::batched_timeout_generic::( tuple_list!(), @@ -243,11 +238,13 @@ where timeout: Duration, ) -> Result where - Self: Executor + HasObservers, + Self: Executor, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { let inner = GenericInProcessExecutorInner::with_timeout_generic::( tuple_list!(), @@ -271,8 +268,10 @@ where H: FnMut(&S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State + HasExecutions + HasSolutions + HasCorpus, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { /// Create a new in mem executor with the default timeout (5 sec) pub fn generic( @@ -284,9 +283,9 @@ where event_mgr: &mut EM, ) -> Result where - Self: Executor + HasObservers, + Self: Executor, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, { @@ -313,11 +312,13 @@ where exec_tmout: Duration, ) -> Result where - Self: Executor + HasObservers, + Self: Executor, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { let inner = GenericInProcessExecutorInner::batched_timeout_generic::( user_hooks, observers, fuzzer, state, event_mgr, exec_tmout, @@ -348,11 +349,13 @@ where timeout: Duration, ) -> Result where - Self: Executor + HasObservers, + Self: Executor, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { let inner = GenericInProcessExecutorInner::with_timeout_generic::( user_hooks, observers, fuzzer, state, event_mgr, timeout, @@ -407,7 +410,7 @@ where H: FnMut(&::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State + HasExecutions + HasSolutions + HasCorpus, { /// the timeout handler @@ -429,16 +432,18 @@ where pub fn run_observers_and_save_state( executor: &mut E, state: &mut E::State, - input: &::Input, + input: &E::Input, fuzzer: &mut Z, event_mgr: &mut EM, exitkind: ExitKind, ) where - E: HasObservers, + E: Executor + HasObservers, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, - E::State: HasExecutions + HasSolutions + HasCorpus, + OF: Feedback, + E::State: HasExecutions + HasSolutions + HasCorpus + HasCurrentTestcase, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me { let mut observers = executor.observers_mut(); @@ -452,8 +457,7 @@ pub fn run_observers_and_save_state( .expect("In run_observers_and_save_state objective failure."); if interesting { - let executions = *state.executions(); - let mut new_testcase = Testcase::with_executions(input.clone(), executions); + let mut new_testcase = Testcase::from(input.clone()); new_testcase.add_metadata(exitkind); new_testcase.set_parent_id_optional(*state.corpus().current()); @@ -474,7 +478,6 @@ pub fn run_observers_and_save_state( state, Event::Objective { objective_size: state.solutions().count(), - executions, time: libafl_bolts::current_time(), }, ) @@ -496,12 +499,16 @@ pub fn run_observers_and_save_state( pub unsafe fn generic_inproc_crash_handler() where E: Executor + HasObservers, + E::Observers: ObserversTuple<::Input, E::State>, EM: EventFirer + EventRestarter, - OF: Feedback, - E::State: HasExecutions + HasSolutions + HasCorpus, - Z: HasObjective, + OF: Feedback, + E::State: HasExecutions + HasSolutions + HasCorpus + HasCurrentTestcase, + Z: HasObjective + + HasScheduler + + ExecutionProcessor, + <::State as HasSolutions>::Solutions: Corpus, //delete me { - let data = addr_of_mut!(GLOBAL_STATE); + let data = &raw mut GLOBAL_STATE; let in_handler = (*data).set_in_handler(true); if (*data).is_valid() { @@ -556,7 +563,7 @@ mod tests { let mut mgr = NopEventManager::new(); let mut state = StdState::new(rand, corpus, solutions, &mut feedback, &mut objective).unwrap(); - let mut fuzzer = StdFuzzer::<_, _, _, ()>::new(sche, feedback, objective); + let mut fuzzer = StdFuzzer::<_, _, _, _>::new(sche, feedback, objective); let mut in_process_executor = InProcessExecutor::new( &mut harness, diff --git a/libafl/src/executors/inprocess/stateful.rs b/libafl/src/executors/inprocess/stateful.rs index e18ece1351..27bd145836 100644 --- a/libafl/src/executors/inprocess/stateful.rs +++ b/libafl/src/executors/inprocess/stateful.rs @@ -11,6 +11,7 @@ use core::{ use libafl_bolts::tuples::{tuple_list, RefIndexable}; use crate::{ + corpus::Corpus, events::{EventFirer, EventRestarter}, executors::{ hooks::{inprocess::InProcessHooks, ExecutorHooksTuple}, @@ -20,7 +21,7 @@ use crate::{ feedbacks::Feedback, fuzzer::HasObjective, inputs::UsesInput, - observers::{ObserversTuple, UsesObservers}, + observers::ObserversTuple, state::{HasCorpus, HasExecutions, HasSolutions, State, UsesState}, Error, }; @@ -33,8 +34,8 @@ pub type StatefulInProcessExecutor<'a, H, OT, S, ES> = /// The process executor simply calls a target function, as boxed `FnMut` trait object /// The internal state of the executor is made available to the harness. pub type OwnedInProcessExecutor = StatefulGenericInProcessExecutor< - dyn FnMut(&::Input, &mut ES) -> ExitKind, - Box::Input, &mut ES) -> ExitKind>, + dyn FnMut(&mut ES, &::Input) -> ExitKind, + Box::Input) -> ExitKind>, (), OT, S, @@ -46,10 +47,10 @@ pub type OwnedInProcessExecutor = StatefulGenericInProcessExecutor< #[allow(dead_code)] pub struct StatefulGenericInProcessExecutor where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, + H: FnMut(&mut ES, &mut S, &S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State, { /// The harness function, being executed for each fuzzing loop execution @@ -63,10 +64,10 @@ where impl Debug for StatefulGenericInProcessExecutor where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, + H: FnMut(&mut ES, &mut S, &S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple + Debug, + OT: ObserversTuple + Debug, S: State, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { @@ -79,34 +80,23 @@ where impl UsesState for StatefulGenericInProcessExecutor where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, + H: FnMut(&mut ES, &mut S, &S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State, { type State = S; } -impl UsesObservers for StatefulGenericInProcessExecutor -where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, - HB: BorrowMut, - HT: ExecutorHooksTuple, - OT: ObserversTuple, - S: State, -{ - type Observers = OT; -} - impl Executor for StatefulGenericInProcessExecutor where EM: UsesState, - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, + H: FnMut(&mut ES, &mut S, &S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State + HasExecutions, Z: UsesState, { @@ -125,7 +115,7 @@ where } self.inner.hooks.pre_exec_all(state, input); - let ret = (self.harness_fn.borrow_mut())(input, &mut self.exposed_executor_state); + let ret = self.harness_fn.borrow_mut()(&mut self.exposed_executor_state, state, input); self.inner.hooks.post_exec_all(state, input); self.inner.leave_target(fuzzer, state, mgr, input); @@ -135,12 +125,13 @@ where impl HasObservers for StatefulGenericInProcessExecutor where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, + H: FnMut(&mut ES, &mut S, &S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State, { + type Observers = OT; #[inline] fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { self.inner.observers() @@ -154,9 +145,11 @@ where impl<'a, H, OT, S, ES> StatefulInProcessExecutor<'a, H, OT, S, ES> where - H: FnMut(&::Input, &mut ES) -> ExitKind + ?Sized, - OT: ObserversTuple, + H: FnMut(&mut ES, &mut S, &::Input) -> ExitKind + ?Sized, + OT: ObserversTuple, S: HasExecutions + HasSolutions + HasCorpus + State, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { /// Create a new in mem executor with the default timeout (5 sec) pub fn new( @@ -170,7 +163,7 @@ where where Self: Executor, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, { @@ -200,9 +193,11 @@ where where Self: Executor, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { let inner = GenericInProcessExecutorInner::batched_timeout_generic::( tuple_list!(), @@ -241,9 +236,11 @@ where where Self: Executor, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { let inner = GenericInProcessExecutorInner::with_timeout_generic::( tuple_list!(), @@ -265,10 +262,10 @@ where impl StatefulGenericInProcessExecutor where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, + H: FnMut(&mut ES, &mut S, &S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State, { /// The executor state given to the harness @@ -284,11 +281,13 @@ where impl StatefulGenericInProcessExecutor where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, + H: FnMut(&mut ES, &mut S, &S::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State + HasExecutions + HasSolutions + HasCorpus, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { /// Create a new in mem executor with the default timeout (5 sec) pub fn generic( @@ -302,7 +301,7 @@ where ) -> Result where EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, { @@ -333,9 +332,11 @@ where ) -> Result where EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { let inner = GenericInProcessExecutorInner::batched_timeout_generic::( user_hooks, observers, fuzzer, state, event_mgr, exec_tmout, @@ -370,9 +371,11 @@ where ) -> Result where EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State, Z: HasObjective, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { let inner = GenericInProcessExecutorInner::with_timeout_generic::( user_hooks, observers, fuzzer, state, event_mgr, timeout, @@ -414,10 +417,10 @@ where impl HasInProcessHooks for StatefulGenericInProcessExecutor where - H: FnMut(&::Input, &mut ES) -> ExitKind + ?Sized, + H: FnMut(&mut ES, &mut S, &::Input) -> ExitKind + ?Sized, HB: BorrowMut, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, S: State + HasExecutions + HasSolutions + HasCorpus, { /// the timeout handler diff --git a/libafl/src/executors/inprocess_fork/inner.rs b/libafl/src/executors/inprocess_fork/inner.rs index 103bba4401..9c82fbf70c 100644 --- a/libafl/src/executors/inprocess_fork/inner.rs +++ b/libafl/src/executors/inprocess_fork/inner.rs @@ -2,7 +2,7 @@ use core::{ ffi::c_void, fmt::{self, Debug, Formatter}, marker::PhantomData, - ptr::{self, addr_of_mut, null_mut, write_volatile}, + ptr::{self, null_mut, write_volatile}, sync::atomic::{compiler_fence, Ordering}, time::Duration, }; @@ -29,21 +29,13 @@ use crate::{ ExitKind, HasObservers, }, inputs::UsesInput, - observers::{ObserversTuple, UsesObservers}, + observers::ObserversTuple, state::{State, UsesState}, Error, }; /// Inner state of GenericInProcessExecutor-like structures. -pub struct GenericInProcessForkExecutorInner -where - OT: ObserversTuple, - S: UsesInput, - SP: ShMemProvider, - HT: ExecutorHooksTuple, - EM: UsesState, - Z: UsesState, -{ +pub struct GenericInProcessForkExecutorInner { pub(super) hooks: (InChildProcessHooks, HT), pub(super) shmem_provider: SP, pub(super) observers: OT, @@ -56,12 +48,9 @@ where impl Debug for GenericInProcessForkExecutorInner where - OT: ObserversTuple + Debug, - S: UsesInput, - SP: ShMemProvider, - HT: ExecutorHooksTuple + Debug, - EM: UsesState, - Z: UsesState, + HT: Debug, + OT: Debug, + SP: Debug, { #[cfg(target_os = "linux")] fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { @@ -86,19 +75,48 @@ where impl UsesState for GenericInProcessForkExecutorInner where - OT: ObserversTuple, S: State, - SP: ShMemProvider, - HT: ExecutorHooksTuple, - EM: UsesState, - Z: UsesState, { type State = S; } +#[cfg(target_os = "linux")] +fn parse_itimerspec(timeout: Duration) -> libc::itimerspec { + let milli_sec = timeout.as_millis(); + let it_value = libc::timespec { + tv_sec: (milli_sec / 1000) as _, + tv_nsec: ((milli_sec % 1000) * 1000 * 1000) as _, + }; + let it_interval = libc::timespec { + tv_sec: 0, + tv_nsec: 0, + }; + libc::itimerspec { + it_interval, + it_value, + } +} + +#[cfg(not(target_os = "linux"))] +fn parse_itimerval(timeout: Duration) -> Itimerval { + let milli_sec = timeout.as_millis(); + let it_value = Timeval { + tv_sec: (milli_sec / 1000) as i64, + tv_usec: (milli_sec % 1000) as i64, + }; + let it_interval = Timeval { + tv_sec: 0, + tv_usec: 0, + }; + Itimerval { + it_interval, + it_value, + } +} + impl GenericInProcessForkExecutorInner where - OT: ObserversTuple + Debug, + OT: ObserversTuple + Debug, S: State + UsesInput, SP: ShMemProvider, HT: ExecutorHooksTuple, @@ -126,10 +144,10 @@ where let mut timerid: libc::timer_t = null_mut(); // creates a new per-process interval timer // we can't do this from the parent, timerid is unique to each process. - libc::timer_create(libc::CLOCK_MONOTONIC, null_mut(), addr_of_mut!(timerid)); + libc::timer_create(libc::CLOCK_MONOTONIC, null_mut(), &raw mut timerid); // log::info!("Set timer! {:#?} {timerid:#?}", self.itimerspec); - let _: i32 = libc::timer_settime(timerid, 0, addr_of_mut!(self.itimerspec), null_mut()); + let _: i32 = libc::timer_settime(timerid, 0, &raw mut self.itimerspec, null_mut()); } #[cfg(not(target_os = "linux"))] { @@ -194,10 +212,7 @@ impl GenericInProcessForkExecutorInner, S: State, - OT: ObserversTuple, - SP: ShMemProvider, - EM: EventFirer + EventRestarter, - Z: UsesState, + OT: ObserversTuple, { #[inline] /// This function marks the boundary between the fuzzer and the target. @@ -209,17 +224,17 @@ where input: &::Input, ) { unsafe { - let data = addr_of_mut!(FORK_EXECUTOR_GLOBAL_DATA); + let data = &raw mut FORK_EXECUTOR_GLOBAL_DATA; write_volatile( - addr_of_mut!((*data).executor_ptr), + &raw mut (*data).executor_ptr, ptr::from_ref(self) as *const c_void, ); write_volatile( - addr_of_mut!((*data).current_input_ptr), + &raw mut (*data).current_input_ptr, ptr::from_ref(input) as *const c_void, ); write_volatile( - addr_of_mut!((*data).state_ptr), + &raw mut ((*data).state_ptr), ptr::from_mut(state) as *mut c_void, ); compiler_fence(Ordering::SeqCst); @@ -253,21 +268,7 @@ where let default_hooks = InChildProcessHooks::new::()?; let mut hooks = tuple_list!(default_hooks).merge(userhooks); hooks.init_all::(state); - - let milli_sec = timeout.as_millis(); - let it_value = libc::timespec { - tv_sec: (milli_sec / 1000) as _, - tv_nsec: ((milli_sec % 1000) * 1000 * 1000) as _, - }; - let it_interval = libc::timespec { - tv_sec: 0, - tv_nsec: 0, - }; - let itimerspec = libc::itimerspec { - it_interval, - it_value, - }; - + let itimerspec = parse_itimerspec(timeout); Ok(Self { shmem_provider, observers, @@ -293,19 +294,7 @@ where let mut hooks = tuple_list!(default_hooks).merge(userhooks); hooks.init_all::(state); - let milli_sec = timeout.as_millis(); - let it_value = Timeval { - tv_sec: (milli_sec / 1000) as i64, - tv_usec: (milli_sec % 1000) as i64, - }; - let it_interval = Timeval { - tv_sec: 0, - tv_usec: 0, - }; - let itimerval = Itimerval { - it_interval, - it_value, - }; + let itimerval = parse_itimerval(timeout); Ok(Self { shmem_provider, @@ -317,27 +306,13 @@ where } } -impl UsesObservers for GenericInProcessForkExecutorInner -where - HT: ExecutorHooksTuple, - OT: ObserversTuple, - S: State, - SP: ShMemProvider, - EM: UsesState, - Z: UsesState, -{ - type Observers = OT; -} - impl HasObservers for GenericInProcessForkExecutorInner where - HT: ExecutorHooksTuple, + OT: ObserversTuple, S: State, - OT: ObserversTuple, - SP: ShMemProvider, - EM: UsesState, - Z: UsesState, { + type Observers = OT; + #[inline] fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { RefIndexable::from(&self.observers) diff --git a/libafl/src/executors/inprocess_fork/mod.rs b/libafl/src/executors/inprocess_fork/mod.rs index e59e6744e4..fd5e7c3f6c 100644 --- a/libafl/src/executors/inprocess_fork/mod.rs +++ b/libafl/src/executors/inprocess_fork/mod.rs @@ -22,7 +22,7 @@ use crate::{ feedbacks::Feedback, fuzzer::HasObjective, inputs::UsesInput, - observers::{ObserversTuple, UsesObservers}, + observers::ObserversTuple, state::{HasExecutions, HasSolutions, State, UsesState}, Error, }; @@ -37,10 +37,12 @@ pub(crate) type ForkHandlerFuncPtr = unsafe fn( /// The inner structure of `InProcessForkExecutor`. pub mod inner; -/// A version of `InProcessForkExecutor` with a state accessible from the harness. pub mod stateful; -/// The `InProcessForkExecutor` with no user hooks +/// The `InProcessForkExecutor` with no user hooks. +/// +/// On Linux, when fuzzing a Rust target, set `panic = "abort"` in your `Cargo.toml` (see [Cargo documentation](https://doc.rust-lang.org/cargo/reference/profiles.html#panic)). +/// Else panics can not be caught by `LibAFL`. pub type InProcessForkExecutor<'a, H, OT, S, SP, EM, Z> = GenericInProcessForkExecutor<'a, H, (), OT, S, SP, EM, Z>; @@ -48,10 +50,10 @@ impl<'a, H, OT, S, SP, EM, Z, OF> InProcessForkExecutor<'a, H, OT, S, SP, EM, Z> where H: FnMut(&S::Input) -> ExitKind + ?Sized, S: State, - OT: ObserversTuple, + OT: ObserversTuple, SP: ShMemProvider, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: HasSolutions, Z: HasObjective, { @@ -80,10 +82,13 @@ where } /// [`GenericInProcessForkExecutor`] is an executor that forks the current process before each execution. +/// +/// On Linux, when fuzzing a Rust target, set `panic = "abort"` in your `Cargo.toml` (see [Cargo documentation](https://doc.rust-lang.org/cargo/reference/profiles.html#panic)). +/// Else panics can not be caught by `LibAFL`. pub struct GenericInProcessForkExecutor<'a, H, HT, OT, S, SP, EM, Z> where H: FnMut(&S::Input) -> ExitKind + ?Sized, - OT: ObserversTuple, + OT: ObserversTuple, S: UsesInput, SP: ShMemProvider, HT: ExecutorHooksTuple, @@ -94,11 +99,10 @@ where inner: GenericInProcessForkExecutorInner, } -impl<'a, H, HT, OT, S, SP, EM, Z> Debug - for GenericInProcessForkExecutor<'a, H, HT, OT, S, SP, EM, Z> +impl Debug for GenericInProcessForkExecutor<'_, H, HT, OT, S, SP, EM, Z> where H: FnMut(&S::Input) -> ExitKind + ?Sized, - OT: ObserversTuple + Debug, + OT: ObserversTuple + Debug, S: UsesInput, SP: ShMemProvider, HT: ExecutorHooksTuple + Debug, @@ -122,11 +126,11 @@ where } } -impl<'a, H, HT, OT, S, SP, EM, Z> UsesState - for GenericInProcessForkExecutor<'a, H, HT, OT, S, SP, EM, Z> +impl UsesState + for GenericInProcessForkExecutor<'_, H, HT, OT, S, SP, EM, Z> where H: FnMut(&S::Input) -> ExitKind + ?Sized, - OT: ObserversTuple, + OT: ObserversTuple, S: State, SP: ShMemProvider, HT: ExecutorHooksTuple, @@ -136,11 +140,11 @@ where type State = S; } -impl<'a, EM, H, HT, OT, S, SP, Z> Executor - for GenericInProcessForkExecutor<'a, H, HT, OT, S, SP, EM, Z> +impl Executor + for GenericInProcessForkExecutor<'_, H, HT, OT, S, SP, EM, Z> where H: FnMut(&S::Input) -> ExitKind + ?Sized, - OT: ObserversTuple + Debug, + OT: ObserversTuple + Debug, S: State + HasExecutions, SP: ShMemProvider, HT: ExecutorHooksTuple, @@ -182,10 +186,10 @@ impl<'a, H, HT, OT, S, SP, EM, Z, OF> GenericInProcessForkExecutor<'a, H, HT, OT where H: FnMut(&S::Input) -> ExitKind + ?Sized, HT: ExecutorHooksTuple, - OT: ObserversTuple, + OT: ObserversTuple, SP: ShMemProvider, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, S: State + HasSolutions, Z: HasObjective, { @@ -229,31 +233,18 @@ where { } } -impl<'a, H, HT, OT, S, SP, EM, Z> UsesObservers - for GenericInProcessForkExecutor<'a, H, HT, OT, S, SP, EM, Z> +impl HasObservers + for GenericInProcessForkExecutor<'_, H, HT, OT, S, SP, EM, Z> where H: FnMut(&S::Input) -> ExitKind + ?Sized, HT: ExecutorHooksTuple, - OT: ObserversTuple, S: State, + OT: ObserversTuple, SP: ShMemProvider, EM: UsesState, Z: UsesState, { type Observers = OT; -} - -impl<'a, H, HT, OT, S, SP, EM, Z> HasObservers - for GenericInProcessForkExecutor<'a, H, HT, OT, S, SP, EM, Z> -where - H: FnMut(&S::Input) -> ExitKind + ?Sized, - HT: ExecutorHooksTuple, - S: State, - OT: ObserversTuple, - SP: ShMemProvider, - EM: UsesState, - Z: UsesState, -{ #[inline] fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { self.inner.observers() @@ -266,10 +257,8 @@ where } /// signal hooks and `panic_hooks` for the child process - pub mod child_signal_handlers { use alloc::boxed::Box; - use core::ptr::addr_of_mut; use std::panic; use libafl_bolts::os::unix_signals::{ucontext_t, Signal}; @@ -282,17 +271,19 @@ pub mod child_signal_handlers { }, inputs::UsesInput, observers::ObserversTuple, + state::UsesState, }; /// invokes the `post_exec_child` hook on all observer in case the child process panics pub fn setup_child_panic_hook() where - E: HasObservers, + E: HasObservers + UsesState, + E::Observers: ObserversTuple<::Input, E::State>, { let old_hook = panic::take_hook(); panic::set_hook(Box::new(move |panic_info| unsafe { old_hook(panic_info); - let data = addr_of_mut!(FORK_EXECUTOR_GLOBAL_DATA); + let data = &raw mut FORK_EXECUTOR_GLOBAL_DATA; if !data.is_null() && (*data).is_valid() { let executor = (*data).executor_mut::(); let mut observers = executor.observers_mut(); @@ -322,7 +313,8 @@ pub mod child_signal_handlers { _context: Option<&mut ucontext_t>, data: &mut InProcessForkExecutorGlobalData, ) where - E: HasObservers, + E: HasObservers + UsesState, + E::Observers: ObserversTuple<::Input, E::State>, { if data.is_valid() { let executor = data.executor_mut::(); @@ -345,7 +337,8 @@ pub mod child_signal_handlers { _context: Option<&mut ucontext_t>, data: &mut InProcessForkExecutorGlobalData, ) where - E: HasObservers, + E: HasObservers + UsesState, + E::Observers: ObserversTuple<::Input, E::State>, { if data.is_valid() { let executor = data.executor_mut::(); @@ -361,6 +354,7 @@ pub mod child_signal_handlers { } #[cfg(test)] +#[cfg(all(feature = "std", feature = "fork", unix))] mod tests { use libafl_bolts::tuples::tuple_list; use serial_test::serial; @@ -373,7 +367,6 @@ mod tests { #[test] #[serial] #[cfg_attr(miri, ignore)] - #[cfg(all(feature = "std", feature = "fork", unix))] fn test_inprocessfork_exec() { use core::marker::PhantomData; @@ -389,7 +382,7 @@ mod tests { hooks::inprocess_fork::InChildProcessHooks, inprocess_fork::GenericInProcessForkExecutor, }, - fuzzer::test::NopFuzzer, + fuzzer::NopFuzzer, state::NopState, }; diff --git a/libafl/src/executors/inprocess_fork/stateful.rs b/libafl/src/executors/inprocess_fork/stateful.rs index 118bdb0ec6..f5a5d854c2 100644 --- a/libafl/src/executors/inprocess_fork/stateful.rs +++ b/libafl/src/executors/inprocess_fork/stateful.rs @@ -1,4 +1,7 @@ -//! The `StatefulGenericInProcessForkExecutor` to do forking before executing the harness in-processly. Harness can access internal state. +//! A version of `InProcessForkExecutor` with a state accessible from the harness. +//! +//! The `StatefulGenericInProcessForkExecutor` to do forking before executing the harness in-process. +//! The harness can access internal state. use core::{ fmt::{self, Debug, Formatter}, marker::PhantomData, @@ -20,8 +23,8 @@ use crate::{ feedbacks::Feedback, fuzzer::HasObjective, inputs::UsesInput, - observers::{ObserversTuple, UsesObservers}, - state::{HasExecutions, HasSolutions, State, UsesState}, + observers::ObserversTuple, + state::{HasExecutions, State, UsesState}, Error, }; @@ -29,15 +32,11 @@ use crate::{ pub type StatefulInProcessForkExecutor<'a, H, OT, S, SP, ES, EM, Z> = StatefulGenericInProcessForkExecutor<'a, H, (), OT, S, SP, ES, EM, Z>; -impl<'a, H, OT, S, SP, ES, EM, Z, OF> StatefulInProcessForkExecutor<'a, H, OT, S, SP, ES, EM, Z> +impl<'a, H, OT, S, SP, ES, EM, Z> StatefulInProcessForkExecutor<'a, H, OT, S, SP, ES, EM, Z> where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, - OT: ObserversTuple, - SP: ShMemProvider, - EM: EventFirer + EventRestarter, - OF: Feedback, - S: State + HasSolutions, - Z: HasObjective, + H: FnMut(&mut ES, &S::Input) -> ExitKind + ?Sized, + OT: ObserversTuple, + S: State, { #[allow(clippy::too_many_arguments)] /// The constructor for `InProcessForkExecutor` @@ -68,30 +67,26 @@ where /// [`StatefulGenericInProcessForkExecutor`] is an executor that forks the current process before each execution. Harness can access some internal state. pub struct StatefulGenericInProcessForkExecutor<'a, H, HT, OT, S, SP, ES, EM, Z> where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, - OT: ObserversTuple, + H: FnMut(&mut ES, &S::Input) -> ExitKind + ?Sized, S: UsesInput, - SP: ShMemProvider, - HT: ExecutorHooksTuple, - EM: UsesState, - Z: UsesState, { + /// The harness function, being executed for each fuzzing loop execution harness_fn: &'a mut H, - exposed_executor_state: ES, - inner: GenericInProcessForkExecutorInner, + /// The state used as argument of the harness + pub exposed_executor_state: ES, + /// Inner state of the executor + pub inner: GenericInProcessForkExecutorInner, phantom: PhantomData, } -impl<'a, H, HT, OT, S, SP, ES, EM, Z> Debug - for StatefulGenericInProcessForkExecutor<'a, H, HT, OT, S, SP, ES, EM, Z> +impl Debug + for StatefulGenericInProcessForkExecutor<'_, H, HT, OT, S, SP, ES, EM, Z> where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, - OT: ObserversTuple + Debug, + H: FnMut(&mut ES, &S::Input) -> ExitKind + ?Sized, + HT: Debug, + OT: Debug, S: UsesInput, - SP: ShMemProvider, - HT: ExecutorHooksTuple + Debug, - EM: UsesState, - Z: UsesState, + SP: Debug, { #[cfg(target_os = "linux")] fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { @@ -110,31 +105,26 @@ where } } -impl<'a, H, HT, OT, S, SP, ES, EM, Z> UsesState - for StatefulGenericInProcessForkExecutor<'a, H, HT, OT, S, SP, ES, EM, Z> +impl UsesState + for StatefulGenericInProcessForkExecutor<'_, H, HT, OT, S, SP, ES, EM, Z> where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, - OT: ObserversTuple, + H: FnMut(&mut ES, &S::Input) -> ExitKind + ?Sized, S: State, - SP: ShMemProvider, - HT: ExecutorHooksTuple, - EM: UsesState, - Z: UsesState, { type State = S; } -impl<'a, EM, H, HT, OT, S, SP, Z, ES, OF> Executor - for StatefulGenericInProcessForkExecutor<'a, H, HT, OT, S, SP, ES, EM, Z> +impl Executor + for StatefulGenericInProcessForkExecutor<'_, H, HT, OT, S, SP, ES, EM, Z> where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, - OT: ObserversTuple + Debug, + EM: EventFirer + EventRestarter, + H: FnMut(&mut ES, &S::Input) -> ExitKind + ?Sized, + HT: ExecutorHooksTuple, + OF: Feedback, + OT: ObserversTuple + Debug, S: State + HasExecutions, SP: ShMemProvider, - HT: ExecutorHooksTuple, - EM: EventFirer + EventRestarter, Z: HasObjective, - OF: Feedback, { #[allow(unreachable_code)] #[inline] @@ -153,7 +143,7 @@ where Ok(ForkResult::Child) => { // Child self.inner.pre_run_target_child(fuzzer, state, mgr, input)?; - (self.harness_fn)(input, &mut self.exposed_executor_state); + (self.harness_fn)(&mut self.exposed_executor_state, input); self.inner.post_run_target_child(fuzzer, state, mgr, input); Ok(ExitKind::Ok) } @@ -167,18 +157,13 @@ where } } -impl<'a, H, HT, OT, S, SP, ES, EM, Z, OF> +impl<'a, H, HT, OT, S, SP, ES, EM, Z> StatefulGenericInProcessForkExecutor<'a, H, HT, OT, S, SP, ES, EM, Z> where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, + H: FnMut(&mut ES, &S::Input) -> ExitKind + ?Sized, HT: ExecutorHooksTuple, - OT: ObserversTuple, - SP: ShMemProvider, - Z: UsesState, - EM: EventFirer + EventRestarter, - OF: Feedback, - S: State + HasSolutions, - Z: HasObjective, + OT: ObserversTuple, + S: State, { /// Creates a new [`StatefulGenericInProcessForkExecutor`] with custom hooks #[allow(clippy::too_many_arguments)] @@ -222,31 +207,15 @@ where } } -impl<'a, H, HT, OT, S, SP, ES, EM, Z> UsesObservers - for StatefulGenericInProcessForkExecutor<'a, H, HT, OT, S, SP, ES, EM, Z> +impl HasObservers + for StatefulGenericInProcessForkExecutor<'_, H, HT, OT, S, SP, ES, EM, Z> where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, - HT: ExecutorHooksTuple, - OT: ObserversTuple, + H: FnMut(&mut ES, &S::Input) -> ExitKind + ?Sized, + OT: ObserversTuple, S: State, - SP: ShMemProvider, - EM: UsesState, - Z: UsesState, { type Observers = OT; -} -impl<'a, H, HT, OT, S, SP, ES, EM, Z> HasObservers - for StatefulGenericInProcessForkExecutor<'a, H, HT, OT, S, SP, ES, EM, Z> -where - H: FnMut(&S::Input, &mut ES) -> ExitKind + ?Sized, - HT: ExecutorHooksTuple, - S: State, - OT: ObserversTuple, - SP: ShMemProvider, - EM: UsesState, - Z: UsesState, -{ #[inline] fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { self.inner.observers() diff --git a/libafl/src/executors/mod.rs b/libafl/src/executors/mod.rs index 5e9d462b36..000cb68081 100644 --- a/libafl/src/executors/mod.rs +++ b/libafl/src/executors/mod.rs @@ -2,10 +2,10 @@ #[cfg(unix)] use alloc::vec::Vec; -use core::fmt::Debug; +use core::{fmt::Debug, time::Duration}; pub use combined::CombinedExecutor; -#[cfg(all(feature = "std", any(unix, doc)))] +#[cfg(all(feature = "std", unix))] pub use command::CommandExecutor; pub use differential::DiffExecutor; #[cfg(all(feature = "std", feature = "fork", unix))] @@ -20,14 +20,10 @@ use serde::{Deserialize, Serialize}; pub use shadow::ShadowExecutor; pub use with_observers::WithObservers; -use crate::{ - observers::{ObserversTuple, UsesObservers}, - state::UsesState, - Error, -}; +use crate::{state::UsesState, Error}; pub mod combined; -#[cfg(all(feature = "std", any(unix, doc)))] +#[cfg(all(feature = "std", unix))] pub mod command; pub mod differential; #[cfg(all(feature = "std", feature = "fork", unix))] @@ -109,7 +105,10 @@ impl From for DiffExitKind { libafl_bolts::impl_serdeany!(DiffExitKind); /// Holds a tuple of Observers -pub trait HasObservers: UsesObservers { +pub trait HasObservers { + /// The observer + type Observers; + /// Get the linked observers fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers>; @@ -131,18 +130,15 @@ where mgr: &mut EM, input: &Self::Input, ) -> Result; +} - /// Wraps this Executor with the given [`ObserversTuple`] to implement [`HasObservers`]. - /// - /// If the executor already implements [`HasObservers`], then the original implementation will be overshadowed by - /// the implementation of this wrapper. - fn with_observers(self, observers: OT) -> WithObservers - where - Self: Sized, - OT: ObserversTuple, - { - WithObservers::new(self, observers) - } +/// A trait that allows to get/set an `Executor`'s timeout thresold +pub trait HasTimeout { + /// Get a timeout + fn timeout(&self) -> Duration; + + /// Set timeout + fn set_timeout(&mut self, timeout: Duration); } /// The common signals we want to handle @@ -165,7 +161,7 @@ pub fn common_signals() -> Vec { } #[cfg(test)] -pub mod test { +mod test { use core::marker::PhantomData; use libafl_bolts::{AsSlice, Error}; @@ -173,7 +169,7 @@ pub mod test { use crate::{ events::NopEventManager, executors::{Executor, ExitKind}, - fuzzer::test::NopFuzzer, + fuzzer::NopFuzzer, inputs::{BytesInput, HasTargetBytes}, state::{HasExecutions, NopState, State, UsesState}, }; @@ -186,6 +182,7 @@ pub mod test { } impl NopExecutor { + /// Creates a new [`NopExecutor`] #[must_use] pub fn new() -> Self { Self { diff --git a/libafl/src/executors/shadow.rs b/libafl/src/executors/shadow.rs index 876f98401b..0ec225b1ca 100644 --- a/libafl/src/executors/shadow.rs +++ b/libafl/src/executors/shadow.rs @@ -1,12 +1,17 @@ //! A `ShadowExecutor` wraps an executor to have shadow observer that will not be considered by the feedbacks and the manager -use core::fmt::{self, Debug, Formatter}; +use core::{ + fmt::{self, Debug, Formatter}, + time::Duration, +}; use libafl_bolts::tuples::RefIndexable; +use super::HasTimeout; use crate::{ executors::{Executor, ExitKind, HasObservers}, - observers::{ObserversTuple, UsesObservers}, + inputs::UsesInput, + observers::ObserversTuple, state::UsesState, Error, }; @@ -34,8 +39,8 @@ where impl ShadowExecutor where - E: HasObservers, - SOT: ObserversTuple<::State>, + E: HasObservers + UsesState, + SOT: ObserversTuple<::Input, ::State>, { /// Create a new `ShadowExecutor`, wrapping the given `executor`. pub fn new(executor: E, shadow_observers: SOT) -> Self { @@ -61,7 +66,7 @@ where impl Executor for ShadowExecutor where E: Executor + HasObservers, - SOT: ObserversTuple, + SOT: ObserversTuple, EM: UsesState, Z: UsesState, { @@ -76,6 +81,20 @@ where } } +impl HasTimeout for ShadowExecutor +where + E: HasTimeout, +{ + #[inline] + fn set_timeout(&mut self, timeout: Duration) { + self.executor.set_timeout(timeout); + } + #[inline] + fn timeout(&self) -> Duration { + self.executor.timeout() + } +} + impl UsesState for ShadowExecutor where E: UsesState, @@ -83,18 +102,12 @@ where type State = E::State; } -impl UsesObservers for ShadowExecutor -where - E: UsesObservers, -{ - type Observers = E::Observers; -} - impl HasObservers for ShadowExecutor where - E: HasObservers, - SOT: ObserversTuple, + E: HasObservers + UsesState, + SOT: ObserversTuple<::Input, ::State>, { + type Observers = E::Observers; #[inline] fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { self.executor.observers() diff --git a/libafl/src/executors/with_observers.rs b/libafl/src/executors/with_observers.rs index 17cb0ba1e1..7b161cbb97 100644 --- a/libafl/src/executors/with_observers.rs +++ b/libafl/src/executors/with_observers.rs @@ -6,7 +6,8 @@ use libafl_bolts::tuples::RefIndexable; use crate::{ executors::{Executor, ExitKind, HasObservers}, - observers::{ObserversTuple, UsesObservers}, + inputs::UsesInput, + observers::ObserversTuple, state::UsesState, Error, }; @@ -42,19 +43,12 @@ where type State = E::State; } -impl UsesObservers for WithObservers -where - E: UsesState, - OT: ObserversTuple, -{ - type Observers = OT; -} - impl HasObservers for WithObservers where E: UsesState, - OT: ObserversTuple, + OT: ObserversTuple<::Input, ::State>, { + type Observers = OT; fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { RefIndexable::from(&self.observers) } diff --git a/libafl/src/feedbacks/capture_feedback.rs b/libafl/src/feedbacks/capture_feedback.rs new file mode 100644 index 0000000000..ea112e264e --- /dev/null +++ b/libafl/src/feedbacks/capture_feedback.rs @@ -0,0 +1,77 @@ +//! Feedback that captures Timeouts for re-running +use std::{borrow::Cow, cell::RefCell, fmt::Debug, rc::Rc}; + +use libafl_bolts::{Error, Named}; +use serde::{de::DeserializeOwned, Serialize}; + +use crate::{ + corpus::Testcase, + executors::ExitKind, + feedbacks::{Feedback, StateInitializer}, + stages::verify_timeouts::TimeoutsToVerify, + state::HasCorpus, + HasMetadata, +}; + +/// A Feedback that captures all timeouts and stores them in State for re-evaluation later. +/// Use in conjunction with `VerifyTimeoutsStage` +#[derive(Debug)] +pub struct CaptureTimeoutFeedback { + enabled: Rc>, +} + +impl CaptureTimeoutFeedback { + /// Create a new [`CaptureTimeoutFeedback`]. + pub fn new(enabled: Rc>) -> Self { + Self { enabled } + } +} + +impl Named for CaptureTimeoutFeedback { + fn name(&self) -> &Cow<'static, str> { + static NAME: Cow<'static, str> = Cow::Borrowed("CaptureTimeoutFeedback"); + &NAME + } +} + +impl StateInitializer for CaptureTimeoutFeedback {} + +impl Feedback for CaptureTimeoutFeedback +where + S: HasCorpus + HasMetadata, + I: Debug + Serialize + DeserializeOwned + Default + 'static + Clone, +{ + #[allow(clippy::wrong_self_convention)] + #[inline] + fn is_interesting( + &mut self, + state: &mut S, + _manager: &mut EM, + input: &I, + _observers: &OT, + exit_kind: &ExitKind, + ) -> Result { + if *self.enabled.borrow() && matches!(exit_kind, ExitKind::Timeout) { + let timeouts = state.metadata_or_insert_with(|| TimeoutsToVerify::::new()); + timeouts.push(input.clone()); + return Ok(false); + } + Ok(matches!(exit_kind, ExitKind::Timeout)) + } + + fn append_metadata( + &mut self, + _state: &mut S, + _manager: &mut EM, + _observers: &OT, + _testcase: &mut Testcase, + ) -> Result<(), Error> { + Ok(()) + } + + #[cfg(feature = "track_hit_feedbacks")] + #[inline] + fn last_result(&self) -> Result { + Ok(false) + } +} diff --git a/libafl/src/feedbacks/concolic.rs b/libafl/src/feedbacks/concolic.rs index 4f613f8fd5..93cdd658f2 100644 --- a/libafl/src/feedbacks/concolic.rs +++ b/libafl/src/feedbacks/concolic.rs @@ -1,85 +1,50 @@ //! Concolic feedback for concolic fuzzing. +//! //! It is used to attach concolic tracing metadata to the testcase. //! This feedback should be used in combination with another feedback as this feedback always considers testcases //! to be not interesting. //! Requires a [`ConcolicObserver`] to observe the concolic trace. use alloc::borrow::Cow; -use core::{fmt::Debug, marker::PhantomData}; +use core::fmt::Debug; use libafl_bolts::{ - tuples::{Handle, Handled, MatchNameRef}, + tuples::{Handle, Handled, MatchName, MatchNameRef}, Named, }; use crate::{ corpus::Testcase, - events::EventFirer, - executors::ExitKind, - feedbacks::Feedback, - inputs::UsesInput, - observers::{concolic::ConcolicObserver, ObserversTuple}, - state::State, + feedbacks::{Feedback, StateInitializer}, + observers::concolic::ConcolicObserver, Error, HasMetadata, }; /// The concolic feedback. It is used to attach concolic tracing metadata to the testcase. +/// /// This feedback should be used in combination with another feedback as this feedback always considers testcases /// to be not interesting. /// Requires a [`ConcolicObserver`] to observe the concolic trace. #[derive(Debug)] -pub struct ConcolicFeedback<'map, S> { +pub struct ConcolicFeedback<'map> { observer_handle: Handle>, - phantom: PhantomData, } -impl<'map, S> ConcolicFeedback<'map, S> { +impl<'map> ConcolicFeedback<'map> { /// Creates a concolic feedback from an observer #[allow(unused)] #[must_use] pub fn from_observer(observer: &ConcolicObserver<'map>) -> Self { Self { observer_handle: observer.handle(), - phantom: PhantomData, } } -} -impl Named for ConcolicFeedback<'_, S> { - fn name(&self) -> &Cow<'static, str> { - self.observer_handle.name() - } -} - -impl Feedback for ConcolicFeedback<'_, S> -where - S: State, -{ - #[allow(clippy::wrong_self_convention)] - fn is_interesting( + fn add_concolic_feedback_to_metadata( &mut self, - _state: &mut S, - _manager: &mut EM, - _input: &::Input, - _observers: &OT, - _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - Ok(false) - } - - fn append_metadata( - &mut self, - _state: &mut S, - _manager: &mut EM, observers: &OT, - testcase: &mut Testcase, - ) -> Result<(), Error> - where - OT: ObserversTuple, - EM: EventFirer, + testcase: &mut Testcase, + ) where + OT: MatchName, { if let Some(metadata) = observers .get(&self.observer_handle) @@ -87,19 +52,34 @@ where { testcase.metadata_map_mut().insert(metadata); } - Ok(()) } +} - fn discard_metadata( - &mut self, - _state: &mut S, - _input: &::Input, - ) -> Result<(), Error> { - Ok(()) +impl Named for ConcolicFeedback<'_> { + fn name(&self) -> &Cow<'static, str> { + self.observer_handle.name() } +} +impl StateInitializer for ConcolicFeedback<'_> {} + +impl Feedback for ConcolicFeedback<'_> +where + OT: MatchName, +{ #[cfg(feature = "track_hit_feedbacks")] fn last_result(&self) -> Result { Ok(false) } + + fn append_metadata( + &mut self, + _state: &mut S, + _manager: &mut EM, + observers: &OT, + testcase: &mut Testcase, + ) -> Result<(), Error> { + self.add_concolic_feedback_to_metadata(observers, testcase); + Ok(()) + } } diff --git a/libafl/src/feedbacks/custom_filename.rs b/libafl/src/feedbacks/custom_filename.rs index e3e4ea7633..df20ad23d4 100644 --- a/libafl/src/feedbacks/custom_filename.rs +++ b/libafl/src/feedbacks/custom_filename.rs @@ -1,134 +1,97 @@ use alloc::{borrow::Cow, string::String}; -use core::{ - fmt::{self, Debug, Formatter}, - marker::PhantomData, -}; +use core::fmt::{self, Debug, Formatter}; use libafl_bolts::Named; use serde::{Deserialize, Serialize}; use crate::{ corpus::Testcase, - events::EventFirer, - executors::ExitKind, - feedbacks::{Feedback, FeedbackFactory}, - inputs::Input, - observers::ObserversTuple, - state::State, + feedbacks::{Feedback, FeedbackFactory, StateInitializer}, Error, }; -/// A [`CustomFilenameToTestcaseFeedback`] takes a closure which returns a filename for the testcase. +/// Type which can generate a custom filename for a given input/state pair +pub trait CustomFilenameGenerator { + /// Sets the name of the provided [`Testcase`] based on the state and input + fn set_name(&mut self, state: &mut S, testcase: &mut Testcase) -> Result; +} + +// maintain compatibility with old impls +impl CustomFilenameGenerator for F +where + F: FnMut(&mut S, &mut Testcase) -> Result, +{ + fn set_name(&mut self, state: &mut S, testcase: &mut Testcase) -> Result { + self(state, testcase) + } +} + +/// A [`CustomFilenameToTestcaseFeedback`] takes a [`CustomFilenameGenerator`] which returns a +/// filename for the testcase. /// Is never interesting (use with an Eager OR). +/// /// Note: Use only in conjunction with a `Corpus` type that writes to disk. /// Note: If used as part of the `Objective` chain, then it will only apply to testcases which are /// `Objectives`, vice versa for `Feedback`. #[derive(Serialize, Deserialize)] -pub struct CustomFilenameToTestcaseFeedback -where - I: Input, - S: State, - F: FnMut(&mut S, &mut Testcase) -> Result, -{ - /// Closure that returns the filename. - func: F, - phantomm: PhantomData<(I, S)>, +pub struct CustomFilenameToTestcaseFeedback { + /// Generator that returns the filename. + generator: N, } -impl CustomFilenameToTestcaseFeedback -where - I: Input, - S: State, - F: FnMut(&mut S, &mut Testcase) -> Result, -{ +impl CustomFilenameToTestcaseFeedback { /// Create a new [`CustomFilenameToTestcaseFeedback`]. - pub fn new(func: F) -> Self { + pub fn new(generator: N) -> Self { + Self { generator } + } +} + +impl FeedbackFactory, T> + for CustomFilenameToTestcaseFeedback +where + N: Clone, +{ + fn create_feedback(&self, _ctx: &T) -> CustomFilenameToTestcaseFeedback { Self { - func, - phantomm: PhantomData, + generator: self.generator.clone(), } } } -impl FeedbackFactory, T> - for CustomFilenameToTestcaseFeedback -where - I: Input, - S: State, - F: FnMut(&mut S, &mut Testcase) -> Result + Clone, -{ - fn create_feedback(&self, _ctx: &T) -> CustomFilenameToTestcaseFeedback { - Self { - func: self.func.clone(), - phantomm: self.phantomm, - } - } -} - -impl Named for CustomFilenameToTestcaseFeedback -where - I: Input, - S: State, - F: FnMut(&mut S, &mut Testcase) -> Result, -{ +impl Named for CustomFilenameToTestcaseFeedback { fn name(&self) -> &Cow<'static, str> { static NAME: Cow<'static, str> = Cow::Borrowed("CustomFilenameToTestcaseFeedback"); &NAME } } -impl Debug for CustomFilenameToTestcaseFeedback -where - I: Input, - S: State, - F: FnMut(&mut S, &mut Testcase) -> Result, -{ +impl Debug for CustomFilenameToTestcaseFeedback { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("CustomFilenameToTestcaseFeedback") .finish_non_exhaustive() } } -impl Feedback for CustomFilenameToTestcaseFeedback +impl StateInitializer for CustomFilenameToTestcaseFeedback {} + +impl Feedback for CustomFilenameToTestcaseFeedback where - S: State, - F: FnMut(&mut S, &mut Testcase) -> Result, - I: Input, + N: CustomFilenameGenerator, { - #[allow(clippy::wrong_self_convention)] - #[inline] - fn is_interesting( - &mut self, - _state: &mut S, - _manager: &mut EM, - _input: &I, - _observers: &OT, - _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - { - Ok(false) - } - - fn append_metadata( - &mut self, - state: &mut S, - _manager: &mut EM, - _observers: &OT, - testcase: &mut Testcase<::Input>, - ) -> Result<(), Error> - where - OT: ObserversTuple, - EM: EventFirer, - { - *testcase.filename_mut() = Some((self.func)(state, testcase)?); - Ok(()) - } - #[cfg(feature = "track_hit_feedbacks")] #[inline] fn last_result(&self) -> Result { Ok(false) } + + fn append_metadata( + &mut self, + state: &mut S, + _manager: &mut EM, + _observers: &OT, + testcase: &mut Testcase, + ) -> Result<(), Error> { + *testcase.filename_mut() = Some(self.generator.set_name(state, testcase)?); + Ok(()) + } } diff --git a/libafl/src/feedbacks/differential.rs b/libafl/src/feedbacks/differential.rs index 708c729783..54c3f6ffc7 100644 --- a/libafl/src/feedbacks/differential.rs +++ b/libafl/src/feedbacks/differential.rs @@ -2,10 +2,7 @@ //! use alloc::borrow::Cow; -use core::{ - fmt::{self, Debug, Formatter}, - marker::PhantomData, -}; +use core::fmt::{self, Debug, Formatter}; use libafl_bolts::{ tuples::{Handle, Handled, MatchName, MatchNameRef}, @@ -16,13 +13,9 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "track_hit_feedbacks")] use crate::feedbacks::premature_last_result_err; use crate::{ - events::EventFirer, executors::ExitKind, - feedbacks::{Feedback, FeedbackFactory}, - inputs::Input, - observers::{Observer, ObserversTuple}, - state::State, - Error, HasMetadata, + feedbacks::{Feedback, FeedbackFactory, StateInitializer}, + Error, }; /// The result of a differential test between two observers. @@ -51,12 +44,24 @@ impl DiffResult { } } -/// A [`DiffFeedback`] compares the content of two [`Observer`]s using the given compare function. -#[derive(Serialize, Deserialize)] -pub struct DiffFeedback +/// Compares two [`crate::observers::Observer`]s to see if the result should be denoted as equal +pub trait DiffComparator { + /// Performs the comparison between two [`crate::observers::Observer`]s + fn compare(&mut self, first: &O1, second: &O2) -> DiffResult; +} + +impl DiffComparator for F where - F: FnMut(&O1, &O2) -> DiffResult, + F: Fn(&O1, &O2) -> DiffResult, { + fn compare(&mut self, first: &O1, second: &O2) -> DiffResult { + self(first, second) + } +} + +/// A [`DiffFeedback`] compares the content of two observers using the given compare function. +#[derive(Serialize, Deserialize)] +pub struct DiffFeedback { /// This feedback's name name: Cow<'static, str>, /// The first observer to compare against @@ -66,19 +71,17 @@ where // The previous run's result of `Self::is_interesting` #[cfg(feature = "track_hit_feedbacks")] last_result: Option, - /// The function used to compare the two observers - compare_fn: F, - phantomm: PhantomData<(I, S)>, + /// The comparator used to compare the two observers + comparator: C, } -impl DiffFeedback +impl DiffFeedback where - F: FnMut(&O1, &O2) -> DiffResult, O1: Named, O2: Named, { /// Create a new [`DiffFeedback`] using two observers and a test function. - pub fn new(name: &'static str, o1: &O1, o2: &O2, compare_fn: F) -> Result { + pub fn new(name: &'static str, o1: &O1, o2: &O2, comparator: C) -> Result { let o1_ref = o1.handle(); let o2_ref = o2.handle(); if o1_ref.name() == o2_ref.name() { @@ -93,51 +96,38 @@ where name: Cow::from(name), #[cfg(feature = "track_hit_feedbacks")] last_result: None, - compare_fn, - phantomm: PhantomData, + comparator, }) } } } -impl FeedbackFactory, T> - for DiffFeedback +impl FeedbackFactory, T> for DiffFeedback where - F: FnMut(&O1, &O2) -> DiffResult + Clone, - I: Input, - O1: Observer + Named, - O2: Observer + Named, - S: HasMetadata + State, + C: Clone, { - fn create_feedback(&self, _ctx: &T) -> DiffFeedback { + fn create_feedback(&self, _ctx: &T) -> DiffFeedback { Self { name: self.name.clone(), o1_ref: self.o1_ref.clone(), o2_ref: self.o2_ref.clone(), - compare_fn: self.compare_fn.clone(), + comparator: self.comparator.clone(), #[cfg(feature = "track_hit_feedbacks")] last_result: None, - phantomm: self.phantomm, } } } -impl Named for DiffFeedback -where - F: FnMut(&O1, &O2) -> DiffResult, - O1: Named, - O2: Named, -{ +impl Named for DiffFeedback { fn name(&self) -> &Cow<'static, str> { &self.name } } -impl Debug for DiffFeedback +impl Debug for DiffFeedback where - F: FnMut(&O1, &O2) -> DiffResult, - O1: Named, - O2: Named, + O1: Debug, + O2: Debug, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("DiffFeedback") @@ -148,27 +138,22 @@ where } } -impl Feedback for DiffFeedback +impl StateInitializer for DiffFeedback {} + +impl Feedback for DiffFeedback where - F: FnMut(&O1, &O2) -> DiffResult, - I: Input, - S: HasMetadata + State, - O1: Observer, - O2: Observer, + OT: MatchName, + C: DiffComparator, { #[allow(clippy::wrong_self_convention)] - fn is_interesting( + fn is_interesting( &mut self, _state: &mut S, _manager: &mut EM, _input: &I, observers: &OT, _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple + MatchName, - { + ) -> Result { fn err(name: &str) -> Error { Error::illegal_argument(format!("DiffFeedback: observer {name} not found")) } @@ -178,7 +163,7 @@ where let o2: &O2 = observers .get(&self.o2_ref) .ok_or_else(|| err(self.o2_ref.name()))?; - let res = (self.compare_fn)(o1, o2) == DiffResult::Diff; + let res = self.comparator.compare(o1, o2) == DiffResult::Diff; #[cfg(feature = "track_hit_feedbacks")] { self.last_result = Some(res); @@ -195,25 +180,24 @@ where #[cfg(test)] mod tests { use alloc::borrow::Cow; - use core::marker::PhantomData; use libafl_bolts::{tuples::tuple_list, Named}; use crate::{ - events::EventFirer, + events::NopEventManager, executors::ExitKind, feedbacks::{differential::DiffResult, DiffFeedback, Feedback}, - inputs::{BytesInput, UsesInput}, + inputs::BytesInput, observers::Observer, - state::{NopState, State, UsesState}, + state::NopState, }; #[derive(Debug)] - struct NopObserver { + struct DummyObserver { name: Cow<'static, str>, value: bool, } - impl NopObserver { + impl DummyObserver { fn new(name: &'static str, value: bool) -> Self { Self { name: Cow::from(name), @@ -221,72 +205,45 @@ mod tests { } } } - impl Observer for NopObserver where S: UsesInput {} - impl PartialEq for NopObserver { + impl Observer for DummyObserver {} + impl PartialEq for DummyObserver { fn eq(&self, other: &Self) -> bool { self.value == other.value } } - impl Named for NopObserver { + impl Named for DummyObserver { fn name(&self) -> &Cow<'static, str> { &self.name } } - struct NopEventFirer { - phantom: PhantomData, - } - impl UsesState for NopEventFirer - where - S: State, - { - type State = S; - } - impl EventFirer for NopEventFirer - where - S: State, - { - fn should_send(&self) -> bool { - true - } - - fn fire( - &mut self, - _state: &mut S, - _event: crate::events::Event, - ) -> Result<(), crate::Error> { - Ok(()) + fn comparator(o1: &DummyObserver, o2: &DummyObserver) -> DiffResult { + if o1 == o2 { + DiffResult::Equal + } else { + DiffResult::Diff } } fn test_diff(should_equal: bool) { - let mut nop_state = NopState::new(); + let mut nop_state: NopState = NopState::new(); - let o1 = NopObserver::new("o1", true); - let o2 = NopObserver::new("o2", should_equal); + let o1 = DummyObserver::new("o1", true); + let o2 = DummyObserver::new("o2", should_equal); - let mut diff_feedback = DiffFeedback::new("diff_feedback", &o1, &o2, |o1, o2| { - if o1 == o2 { - DiffResult::Equal - } else { - DiffResult::Diff - } - }) - .unwrap(); + let mut diff_feedback = DiffFeedback::new("diff_feedback", &o1, &o2, comparator).unwrap(); let observers = tuple_list![o1, o2]; assert_eq!( !should_equal, - diff_feedback - .is_interesting( - &mut nop_state, - &mut NopEventFirer { - phantom: PhantomData - }, - &BytesInput::new(vec![0]), - &observers, - &ExitKind::Ok - ) - .unwrap() + DiffFeedback::<_, _, _>::is_interesting( + &mut diff_feedback, + &mut nop_state, + &mut NopEventManager::>::default(), + &BytesInput::new(vec![0]), + &observers, + &ExitKind::Ok + ) + .unwrap() ); } diff --git a/libafl/src/feedbacks/list.rs b/libafl/src/feedbacks/list.rs index 4c4f56a46f..d1590a957d 100644 --- a/libafl/src/feedbacks/list.rs +++ b/libafl/src/feedbacks/list.rs @@ -3,46 +3,34 @@ use core::{fmt::Debug, hash::Hash}; use hashbrown::HashSet; use libafl_bolts::{ - tuples::{Handle, Handled, MatchNameRef}, + tuples::{Handle, Handled, MatchName, MatchNameRef}, Error, HasRefCnt, Named, }; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ - events::EventFirer, executors::ExitKind, - feedbacks::Feedback, - observers::{ListObserver, ObserversTuple}, - state::State, + feedbacks::{Feedback, StateInitializer}, + observers::ListObserver, HasNamedMetadata, }; /// The metadata to remember past observed value -#[derive(Default, Serialize, Deserialize, Clone, Debug)] -#[serde(bound = "T: DeserializeOwned")] -#[cfg_attr( - any(not(feature = "serdeany_autoreg"), miri), - allow(clippy::unsafe_derive_deserialize) -)] -pub struct ListFeedbackMetadata -where - T: Default + Copy + 'static + Serialize + Eq + Hash, -{ +#[derive(Debug, Serialize, Deserialize)] +#[serde(bound = "T: Eq + Hash + for<'a> Deserialize<'a> + Serialize")] +pub struct ListFeedbackMetadata { /// Contains the information of past observed set of values. pub set: HashSet, /// A refcount used to know when we can remove this metadata pub tcref: isize, } -impl ListFeedbackMetadata -where - T: Default + Copy + 'static + Serialize + Eq + Hash, -{ +impl ListFeedbackMetadata { /// The constructor #[must_use] pub fn new() -> Self { Self { - set: HashSet::::new(), + set: HashSet::new(), tcref: 0, } } @@ -54,10 +42,14 @@ where } } -impl HasRefCnt for ListFeedbackMetadata -where - T: Default + Copy + 'static + Serialize + Eq + Hash, -{ +impl Default for ListFeedbackMetadata { + #[must_use] + fn default() -> Self { + Self::new() + } +} + +impl HasRefCnt for ListFeedbackMetadata { fn refcnt(&self) -> isize { self.tcref } @@ -69,43 +61,29 @@ where /// Consider interesting a testcase if the list in `ListObserver` is not empty. #[derive(Clone, Debug)] -pub struct ListFeedback -where - T: Hash + Eq, -{ +pub struct ListFeedback { observer_handle: Handle>, novelty: HashSet, } libafl_bolts::impl_serdeany!( - ListFeedbackMetadata, + ListFeedbackMetadata, ,,,,,,,,,, ); -impl Feedback for ListFeedback +impl ListFeedback where - S: State + HasNamedMetadata, - T: Debug + Serialize + Hash + Eq + DeserializeOwned + Default + Copy + 'static, + T: Debug + Eq + Hash + for<'a> Deserialize<'a> + Serialize + 'static + Copy, { - fn init_state(&mut self, state: &mut S) -> Result<(), Error> { - // eprintln!("self.name {:#?}", &self.name); - state.add_named_metadata(self.name(), ListFeedbackMetadata::::default()); - Ok(()) - } - #[allow(clippy::wrong_self_convention)] - fn is_interesting( + fn has_interesting_list_observer_feedback( &mut self, state: &mut S, - _manager: &mut EM, - _input: &S::Input, observers: &OT, - _exit_kind: &ExitKind, - ) -> Result + ) -> bool where - EM: EventFirer, - OT: ObserversTuple, + OT: MatchName, + S: HasNamedMetadata, { - // TODO Replace with match_name_type when stable let observer = observers.get(&self.observer_handle).unwrap(); // TODO register the list content in a testcase metadata self.novelty.clear(); @@ -119,20 +97,10 @@ where self.novelty.insert(*v); } } - Ok(!self.novelty.is_empty()) + !self.novelty.is_empty() } - fn append_metadata( - &mut self, - state: &mut S, - _manager: &mut EM, - _observers: &OT, - _testcase: &mut crate::corpus::Testcase<::Input>, - ) -> Result<(), Error> - where - OT: ObserversTuple, - EM: EventFirer, - { + fn append_list_observer_metadata(&mut self, state: &mut S) { let history_set = state .named_metadata_map_mut() .get_mut::>(self.name()) @@ -141,34 +109,69 @@ where for v in &self.novelty { history_set.set.insert(*v); } + } +} + +impl StateInitializer for ListFeedback +where + S: HasNamedMetadata, + T: Debug + Eq + Hash + for<'a> Deserialize<'a> + Serialize + Default + Copy + 'static, +{ + fn init_state(&mut self, state: &mut S) -> Result<(), Error> { + state.add_named_metadata(self.name(), ListFeedbackMetadata::::default()); Ok(()) } +} + +impl Feedback for ListFeedback +where + OT: MatchName, + S: HasNamedMetadata, + T: Debug + Eq + Hash + for<'a> Deserialize<'a> + Serialize + Default + Copy + 'static, +{ + #[allow(clippy::wrong_self_convention)] + fn is_interesting( + &mut self, + state: &mut S, + _manager: &mut EM, + _input: &I, + observers: &OT, + _exit_kind: &ExitKind, + ) -> Result { + Ok(self.has_interesting_list_observer_feedback(state, observers)) + } + #[cfg(feature = "track_hit_feedbacks")] fn last_result(&self) -> Result { Ok(!self.novelty.is_empty()) } + + fn append_metadata( + &mut self, + state: &mut S, + _manager: &mut EM, + _observers: &OT, + _testcase: &mut crate::corpus::Testcase, + ) -> Result<(), Error> { + self.append_list_observer_metadata(state); + Ok(()) + } } -impl Named for ListFeedback -where - T: Debug + Serialize + Hash + Eq + DeserializeOwned, -{ +impl Named for ListFeedback { #[inline] fn name(&self) -> &Cow<'static, str> { self.observer_handle.name() } } -impl ListFeedback -where - T: Debug + Serialize + Hash + Eq + DeserializeOwned, -{ +impl ListFeedback { /// Creates a new [`ListFeedback`], deciding if the given [`ListObserver`] value of a run is interesting. #[must_use] pub fn new(observer: &ListObserver) -> Self { Self { observer_handle: observer.handle(), - novelty: HashSet::::new(), + novelty: HashSet::new(), } } } diff --git a/libafl/src/feedbacks/map.rs b/libafl/src/feedbacks/map.rs index 97f36cf2fe..e54269a7cf 100644 --- a/libafl/src/feedbacks/map.rs +++ b/libafl/src/feedbacks/map.rs @@ -12,7 +12,7 @@ use core::{ #[rustversion::nightly] use libafl_bolts::AsSlice; use libafl_bolts::{ - tuples::{Handle, Handled, MatchNameRef}, + tuples::{Handle, Handled, MatchName, MatchNameRef}, AsIter, HasRefCnt, Named, }; use num_traits::PrimInt; @@ -24,37 +24,33 @@ use crate::{ corpus::Testcase, events::{Event, EventFirer}, executors::ExitKind, - feedbacks::{Feedback, HasObserverHandle}, + feedbacks::{Feedback, HasObserverHandle, StateInitializer}, inputs::UsesInput, monitors::{AggregatorOps, UserStats, UserStatsValue}, - observers::{CanTrack, MapObserver, Observer, ObserversTuple}, - state::State, + observers::{CanTrack, MapObserver}, Error, HasMetadata, HasNamedMetadata, }; /// A [`MapFeedback`] that implements the AFL algorithm using an [`OrReducer`] combining the bits for the history map and the bit from (`HitcountsMapObserver`)[`crate::observers::HitcountsMapObserver`]. -pub type AflMapFeedback = MapFeedback; +pub type AflMapFeedback = MapFeedback; /// A [`MapFeedback`] that strives to maximize the map contents. -pub type MaxMapFeedback = MapFeedback; +pub type MaxMapFeedback = MapFeedback; /// A [`MapFeedback`] that strives to minimize the map contents. -pub type MinMapFeedback = MapFeedback; +pub type MinMapFeedback = MapFeedback; /// A [`MapFeedback`] that always returns `true` for `is_interesting`. Useful for tracing all executions. -pub type AlwaysInterestingMapFeedback = MapFeedback; +pub type AlwaysInterestingMapFeedback = MapFeedback; /// A [`MapFeedback`] that strives to maximize the map contents, /// but only, if a value is larger than `pow2` of the previous. -pub type MaxMapPow2Feedback = MapFeedback; +pub type MaxMapPow2Feedback = MapFeedback; /// A [`MapFeedback`] that strives to maximize the map contents, -/// but only, if a value is larger than `pow2` of the previous. -pub type MaxMapOneOrFilledFeedback = MapFeedback; +/// but only, if a value is either `T::one()` or `T::max_value()`. +pub type MaxMapOneOrFilledFeedback = MapFeedback; /// A `Reducer` function is used to aggregate values for the novelty search -pub trait Reducer: 'static -where - T: Default + Copy + 'static, -{ +pub trait Reducer { /// Reduce two values to one value, with the current [`Reducer`]. fn reduce(first: T, second: T) -> T; } @@ -65,7 +61,7 @@ pub struct OrReducer {} impl Reducer for OrReducer where - T: BitOr + Default + Copy + 'static + PartialOrd, + T: BitOr, { #[inline] fn reduce(history: T, new: T) -> T { @@ -79,7 +75,7 @@ pub struct AndReducer {} impl Reducer for AndReducer where - T: BitAnd + Default + Copy + 'static + PartialOrd, + T: BitAnd, { #[inline] fn reduce(history: T, new: T) -> T { @@ -91,10 +87,7 @@ where #[derive(Clone, Debug)] pub struct NopReducer {} -impl Reducer for NopReducer -where - T: Default + Copy + 'static, -{ +impl Reducer for NopReducer { #[inline] fn reduce(_history: T, new: T) -> T { new @@ -107,7 +100,7 @@ pub struct MaxReducer {} impl Reducer for MaxReducer where - T: Default + Copy + 'static + PartialOrd, + T: PartialOrd, { #[inline] fn reduce(first: T, second: T) -> T { @@ -125,7 +118,7 @@ pub struct MinReducer {} impl Reducer for MinReducer where - T: Default + Copy + 'static + PartialOrd, + T: PartialOrd, { #[inline] fn reduce(first: T, second: T) -> T { @@ -138,10 +131,7 @@ where } /// A `IsNovel` function is used to discriminate if a reduced value is considered novel. -pub trait IsNovel: 'static -where - T: Default + Copy + 'static, -{ +pub trait IsNovel { /// If a new value in the [`MapFeedback`] was found, /// this filter can decide if the result is considered novel or not. fn is_novel(old: T, new: T) -> bool; @@ -226,10 +216,7 @@ where /// A testcase metadata holding a list of indexes of a map #[derive(Debug, Serialize, Deserialize)] -#[cfg_attr( - any(not(feature = "serdeany_autoreg"), miri), - allow(clippy::unsafe_derive_deserialize) -)] // for SerdeAny +#[allow(clippy::unsafe_derive_deserialize)] // for SerdeAny pub struct MapIndexesMetadata { /// The list of indexes. pub list: Vec, @@ -274,10 +261,7 @@ impl MapIndexesMetadata { /// A testcase metadata holding a list of indexes of a map #[derive(Debug, Serialize, Deserialize)] -#[cfg_attr( - any(not(feature = "serdeany_autoreg"), miri), - allow(clippy::unsafe_derive_deserialize) -)] // for SerdeAny +#[allow(clippy::unsafe_derive_deserialize)] // for SerdeAny pub struct MapNoveltiesMetadata { /// A `list` of novelties. pub list: Vec, @@ -312,15 +296,8 @@ impl MapNoveltiesMetadata { /// The state of [`MapFeedback`] #[derive(Default, Serialize, Deserialize, Clone, Debug)] -#[serde(bound = "T: DeserializeOwned")] -#[cfg_attr( - any(not(feature = "serdeany_autoreg"), miri), - allow(clippy::unsafe_derive_deserialize) -)] // for SerdeAny -pub struct MapFeedbackMetadata -where - T: Default + Copy + 'static + Serialize, -{ +#[allow(clippy::unsafe_derive_deserialize)] // for SerdeAny +pub struct MapFeedbackMetadata { /// Contains information about untouched entries pub history_map: Vec, /// Tells us how many non-initial entries there are in `history_map` @@ -328,7 +305,7 @@ where } libafl_bolts::impl_serdeany!( - MapFeedbackMetadata, + MapFeedbackMetadata, ,,,,,,,,,,,, ); @@ -384,7 +361,7 @@ where /// The most common AFL-like feedback type #[derive(Clone, Debug)] -pub struct MapFeedback { +pub struct MapFeedback { /// New indexes observed in the last observation novelties: Option>, /// Name identifier of this instance @@ -397,39 +374,45 @@ pub struct MapFeedback { #[cfg(feature = "track_hit_feedbacks")] last_result: Option, /// Phantom Data of Reducer - phantom: PhantomData<(C, N, O, R, T)>, + #[allow(clippy::type_complexity)] + phantom: PhantomData (N, O, R)>, } -impl Feedback for MapFeedback +impl StateInitializer for MapFeedback where - N: IsNovel, - O: MapObserver + for<'it> AsIter<'it, Item = T>, - R: Reducer, - S: State + HasNamedMetadata, - T: Default + Copy + Serialize + for<'de> Deserialize<'de> + PartialEq + Debug + 'static, - C: CanTrack + AsRef + Observer, + O: MapObserver, + O::Entry: 'static + Default + Debug + DeserializeOwned + Serialize, + S: HasNamedMetadata, { fn init_state(&mut self, state: &mut S) -> Result<(), Error> { // Initialize `MapFeedbackMetadata` with an empty vector and add it to the state. // The `MapFeedbackMetadata` would be resized on-demand in `is_interesting` - state.add_named_metadata(&self.name, MapFeedbackMetadata::::default()); + state.add_named_metadata(&self.name, MapFeedbackMetadata::::default()); Ok(()) } +} +impl Feedback for MapFeedback +where + C: CanTrack + AsRef, + EM: EventFirer, + N: IsNovel, + O: MapObserver + for<'it> AsIter<'it, Item = O::Entry>, + O::Entry: 'static + Default + Debug + DeserializeOwned + Serialize, + OT: MatchName, + R: Reducer, + S: HasNamedMetadata + UsesInput, // delete me +{ #[rustversion::nightly] - default fn is_interesting( + default fn is_interesting( &mut self, state: &mut S, - manager: &mut EM, - input: &S::Input, + _manager: &mut EM, + _input: &I, observers: &OT, - exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - let res = self.is_interesting_default(state, manager, input, observers, exit_kind); + _exit_kind: &ExitKind, + ) -> Result { + let res = self.is_interesting_default(state, observers); #[cfg(feature = "track_hit_feedbacks")] { self.last_result = Some(res); @@ -438,19 +421,15 @@ where } #[rustversion::not(nightly)] - fn is_interesting( + fn is_interesting( &mut self, state: &mut S, - manager: &mut EM, - input: &::Input, + _manager: &mut EM, + _input: &I, observers: &OT, - exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - let res = self.is_interesting_default(state, manager, input, observers, exit_kind); + _exit_kind: &ExitKind, + ) -> Result { + let res = self.is_interesting_default(state, observers); #[cfg(feature = "track_hit_feedbacks")] { @@ -459,17 +438,18 @@ where Ok(res) } - fn append_metadata( + #[cfg(feature = "track_hit_feedbacks")] + fn last_result(&self) -> Result { + self.last_result.ok_or(premature_last_result_err()) + } + + fn append_metadata( &mut self, state: &mut S, manager: &mut EM, observers: &OT, - testcase: &mut Testcase, - ) -> Result<(), Error> - where - OT: ObserversTuple, - EM: EventFirer, - { + testcase: &mut Testcase, + ) -> Result<(), Error> { if let Some(novelties) = self.novelties.as_mut().map(core::mem::take) { let meta = MapNoveltiesMetadata::new(novelties); testcase.add_metadata(meta); @@ -478,7 +458,7 @@ where let initial = observer.initial(); let map_state = state .named_metadata_map_mut() - .get_mut::>(&self.name) + .get_mut::>(&self.name) .unwrap(); let len = observer.len(); if map_state.history_map.len() < len { @@ -551,34 +531,97 @@ where Ok(()) } +} - #[cfg(feature = "track_hit_feedbacks")] - fn last_result(&self) -> Result { - self.last_result.ok_or(premature_last_result_err()) +/// Specialize for the common coverage map size, maximization of u8s +#[rustversion::nightly] +impl Feedback for MapFeedback +where + C: CanTrack + AsRef, + EM: EventFirer, + O: MapObserver + for<'a> AsSlice<'a, Entry = u8> + for<'a> AsIter<'a, Item = u8>, + OT: MatchName, + S: HasNamedMetadata + UsesInput, +{ + #[allow(clippy::wrong_self_convention)] + #[allow(clippy::needless_range_loop)] + fn is_interesting( + &mut self, + state: &mut S, + _manager: &mut EM, + _input: &I, + observers: &OT, + _exit_kind: &ExitKind, + ) -> Result { + Ok(self.is_interesting_u8_simd_optimized(state, observers)) + } +} + +impl Named for MapFeedback { + #[inline] + fn name(&self) -> &Cow<'static, str> { + &self.name + } +} + +#[allow(clippy::ptr_arg)] +fn create_stats_name(name: &Cow<'static, str>) -> Cow<'static, str> { + if name.chars().all(char::is_lowercase) { + name.clone() + } else { + name.to_lowercase().into() + } +} + +impl MapFeedback +where + C: CanTrack + AsRef + Named, +{ + /// Create new `MapFeedback` + #[must_use] + pub fn new(map_observer: &C) -> Self { + Self { + novelties: if C::NOVELTIES { Some(vec![]) } else { None }, + name: map_observer.name().clone(), + map_ref: map_observer.handle(), + stats_name: create_stats_name(map_observer.name()), + #[cfg(feature = "track_hit_feedbacks")] + last_result: None, + phantom: PhantomData, + } + } + + /// Creating a new `MapFeedback` with a specific name. This is usefully whenever the same + /// feedback is needed twice, but with a different history. Using `new()` always results in the + /// same name and therefore also the same history. + #[must_use] + pub fn with_name(name: &'static str, map_observer: &C) -> Self { + let name = Cow::from(name); + Self { + novelties: if C::NOVELTIES { Some(vec![]) } else { None }, + map_ref: map_observer.handle(), + stats_name: create_stats_name(&name), + name, + #[cfg(feature = "track_hit_feedbacks")] + last_result: None, + phantom: PhantomData, + } } } /// Specialize for the common coverage map size, maximization of u8s #[rustversion::nightly] -impl Feedback for MapFeedback +impl MapFeedback where O: MapObserver + for<'a> AsSlice<'a, Entry = u8> + for<'a> AsIter<'a, Item = u8>, - S: State + HasNamedMetadata, - C: CanTrack + AsRef + Observer, + C: CanTrack + AsRef, { #[allow(clippy::wrong_self_convention)] #[allow(clippy::needless_range_loop)] - fn is_interesting( - &mut self, - state: &mut S, - _manager: &mut EM, - _input: &S::Input, - observers: &OT, - _exit_kind: &ExitKind, - ) -> Result + fn is_interesting_u8_simd_optimized(&mut self, state: &mut S, observers: &OT) -> bool where - EM: EventFirer, - OT: ObserversTuple, + S: HasNamedMetadata, + OT: MatchName, { // 128 bits vectors type VectorType = core::simd::u8x16; @@ -675,22 +718,11 @@ where { self.last_result = Some(interesting); } - Ok(interesting) + interesting } } -impl Named for MapFeedback { - #[inline] - fn name(&self) -> &Cow<'static, str> { - &self.name - } -} - -impl HasObserverHandle for MapFeedback -where - O: Named, - C: AsRef, -{ +impl HasObserverHandle for MapFeedback { type Observer = C; #[inline] @@ -699,70 +731,21 @@ where } } -#[allow(clippy::ptr_arg)] -fn create_stats_name(name: &Cow<'static, str>) -> Cow<'static, str> { - if name.chars().all(char::is_lowercase) { - name.clone() - } else { - name.to_lowercase().into() - } -} - -impl MapFeedback +impl MapFeedback where - T: PartialEq + Default + Copy + 'static + Serialize + DeserializeOwned + Debug, - R: Reducer, - O: MapObserver, - for<'it> O: AsIter<'it, Item = T>, - N: IsNovel, - C: CanTrack + AsRef + Named, + R: Reducer, + O: MapObserver + for<'it> AsIter<'it, Item = O::Entry>, + O::Entry: 'static + Debug + Serialize + DeserializeOwned, + N: IsNovel, + C: AsRef, { - /// Create new `MapFeedback` - #[must_use] - pub fn new(map_observer: &C) -> Self { - Self { - novelties: if C::NOVELTIES { Some(vec![]) } else { None }, - name: map_observer.name().clone(), - map_ref: map_observer.handle(), - stats_name: create_stats_name(map_observer.name()), - #[cfg(feature = "track_hit_feedbacks")] - last_result: None, - phantom: PhantomData, - } - } - - /// Creating a new `MapFeedback` with a specific name. This is usefully whenever the same - /// feedback is needed twice, but with a different history. Using `new()` always results in the - /// same name and therefore also the same history. - #[must_use] - pub fn with_name(name: &'static str, map_observer: &C) -> Self { - let name = Cow::from(name); - Self { - novelties: if C::NOVELTIES { Some(vec![]) } else { None }, - map_ref: map_observer.handle(), - stats_name: create_stats_name(&name), - name, - #[cfg(feature = "track_hit_feedbacks")] - last_result: None, - phantom: PhantomData, - } - } - #[allow(clippy::wrong_self_convention)] #[allow(clippy::needless_range_loop)] #[allow(clippy::trivially_copy_pass_by_ref)] - fn is_interesting_default( - &mut self, - state: &mut S, - _manager: &mut EM, - _input: &S::Input, - observers: &OT, - _exit_kind: &ExitKind, - ) -> bool + fn is_interesting_default(&mut self, state: &mut S, observers: &OT) -> bool where - EM: EventFirer, - OT: ObserversTuple, - S: UsesInput + HasNamedMetadata, + S: HasNamedMetadata, + OT: MatchName, { let mut interesting = false; // TODO Replace with match_name_type when stable @@ -770,7 +753,7 @@ where let map_state = state .named_metadata_map_mut() - .get_mut::>(&self.name) + .get_mut::>(&self.name) .unwrap(); let len = observer.len(); if map_state.history_map.len() < len { @@ -822,7 +805,7 @@ mod tests { #[test] fn test_map_is_novel() { - // sanity check + // This should always hold assert!(AllIsNovel::is_novel(0_u8, 0)); assert!(!NextPow2IsNovel::is_novel(0_u8, 0)); diff --git a/libafl/src/feedbacks/mod.rs b/libafl/src/feedbacks/mod.rs index 3cd3457835..5a2e463583 100644 --- a/libafl/src/feedbacks/mod.rs +++ b/libafl/src/feedbacks/mod.rs @@ -7,16 +7,13 @@ use alloc::borrow::Cow; #[cfg(feature = "track_hit_feedbacks")] use alloc::vec::Vec; -use core::{ - fmt::{self, Debug, Formatter}, - marker::PhantomData, -}; +use core::{fmt::Debug, marker::PhantomData}; #[cfg(feature = "std")] pub use concolic::ConcolicFeedback; pub use differential::DiffFeedback; use libafl_bolts::{ - tuples::{Handle, Handled, MatchNameRef}, + tuples::{Handle, Handled, MatchName, MatchNameRef}, Named, }; pub use list::*; @@ -29,14 +26,11 @@ pub use new_hash_feedback::NewHashFeedback; pub use new_hash_feedback::NewHashFeedbackMetadata; use serde::{Deserialize, Serialize}; -use crate::{ - corpus::Testcase, - events::EventFirer, - executors::ExitKind, - observers::{ObserversTuple, TimeObserver}, - state::State, - Error, -}; +use crate::{corpus::Testcase, executors::ExitKind, observers::TimeObserver, Error}; + +#[cfg(feature = "std")] +pub mod capture_feedback; + #[cfg(feature = "std")] pub mod concolic; #[cfg(feature = "std")] @@ -54,49 +48,56 @@ pub mod new_hash_feedback; pub mod stdio; pub mod transferred; -/// Feedbacks evaluate the observers. -/// Basically, they reduce the information provided by an observer to a value, -/// indicating the "interestingness" of the last run. -pub trait Feedback: Named -where - S: State, -{ +#[cfg(feature = "std")] +pub use capture_feedback::CaptureTimeoutFeedback; + +#[cfg(feature = "introspection")] +use crate::state::HasClientPerfMonitor; + +/// Feedback which initializes a state. +/// +/// This trait is separate from the general [`Feedback`] definition as it would not be sufficiently +/// specified otherwise. +pub trait StateInitializer { /// Initializes the feedback state. /// This method is called after that the `State` is created. fn init_state(&mut self, _state: &mut S) -> Result<(), Error> { Ok(()) } +} +/// Feedbacks evaluate the observers. +/// Basically, they reduce the information provided by an observer to a value, +/// indicating the "interestingness" of the last run. +pub trait Feedback: StateInitializer + Named { /// `is_interesting ` return if an input is worth the addition to the corpus #[allow(clippy::wrong_self_convention)] - fn is_interesting( + fn is_interesting( &mut self, - state: &mut S, - manager: &mut EM, - input: &S::Input, - observers: &OT, - exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple; + _state: &mut S, + _manager: &mut EM, + _input: &I, + _observers: &OT, + _exit_kind: &ExitKind, + ) -> Result { + Ok(false) + } /// Returns if the result of a run is interesting and the value input should be stored in a corpus. /// It also keeps track of introspection stats. #[cfg(feature = "introspection")] #[allow(clippy::too_many_arguments)] #[allow(clippy::wrong_self_convention)] - fn is_interesting_introspection( + fn is_interesting_introspection( &mut self, state: &mut S, manager: &mut EM, - input: &S::Input, + input: &I, observers: &OT, exit_kind: &ExitKind, ) -> Result where - EM: EventFirer, - OT: ObserversTuple, + S: HasClientPerfMonitor, { // Start a timer for this feedback let start_time = libafl_bolts::cpu::read_time_counter(); @@ -131,25 +132,23 @@ where } /// Append to the testcase the generated metadata in case of a new corpus item + /// + /// Precondition: `testcase` must contain an input. #[inline] #[allow(unused_variables)] - fn append_metadata( + fn append_metadata( &mut self, - state: &mut S, - manager: &mut EM, - observers: &OT, - testcase: &mut Testcase, - ) -> Result<(), Error> - where - OT: ObserversTuple, - EM: EventFirer, - { + _state: &mut S, + _manager: &mut EM, + _observers: &OT, + _testcase: &mut Testcase, + ) -> Result<(), Error> { Ok(()) } /// Discard the stored metadata in case that the testcase is not added to the corpus #[inline] - fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn discard_metadata(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { Ok(()) } } @@ -165,39 +164,26 @@ pub trait HasObserverHandle { /// A combined feedback consisting of multiple [`Feedback`]s #[derive(Debug)] -pub struct CombinedFeedback -where - A: Feedback, - B: Feedback, - FL: FeedbackLogic, - S: State, -{ +pub struct CombinedFeedback { /// First [`Feedback`] pub first: A, /// Second [`Feedback`] pub second: B, name: Cow<'static, str>, - phantom: PhantomData<(S, FL)>, + phantom: PhantomData, } -impl Named for CombinedFeedback -where - A: Feedback, - B: Feedback, - FL: FeedbackLogic, - S: State, -{ +impl Named for CombinedFeedback { fn name(&self) -> &Cow<'static, str> { &self.name } } -impl CombinedFeedback +impl CombinedFeedback where - A: Feedback, - B: Feedback, - FL: FeedbackLogic, - S: State, + A: Named, + B: Named, + FL: FeedbackLogic, { /// Create a new combined feedback pub fn new(first: A, second: B) -> Self { @@ -216,42 +202,42 @@ where } } -impl Feedback for CombinedFeedback +impl StateInitializer for CombinedFeedback where - A: Feedback, - B: Feedback, - FL: FeedbackLogic, - S: State, + A: StateInitializer, + B: StateInitializer, { fn init_state(&mut self, state: &mut S) -> Result<(), Error> { self.first.init_state(state)?; self.second.init_state(state)?; Ok(()) } - #[cfg(feature = "track_hit_feedbacks")] - fn last_result(&self) -> Result { - FL::last_result(&self.first, &self.second) - } - #[cfg(feature = "track_hit_feedbacks")] - fn append_hit_feedbacks(&self, list: &mut Vec>) -> Result<(), Error> { - FL::append_hit_feedbacks(&self.first, &self.second, list) - } +} + +impl Feedback for CombinedFeedback +where + A: Feedback, + B: Feedback, + FL: FeedbackLogic, +{ #[allow(clippy::wrong_self_convention)] - fn is_interesting( + fn is_interesting( &mut self, state: &mut S, manager: &mut EM, - input: &S::Input, + input: &I, observers: &OT, exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { + ) -> Result { FL::is_pair_interesting( - &mut self.first, - &mut self.second, + |state, manager, input, observers, exit_kind| { + self.first + .is_interesting(state, manager, input, observers, exit_kind) + }, + |state, manager, input, observers, exit_kind| { + self.second + .is_interesting(state, manager, input, observers, exit_kind) + }, state, manager, input, @@ -262,21 +248,26 @@ where #[cfg(feature = "introspection")] #[allow(clippy::wrong_self_convention)] - fn is_interesting_introspection( + fn is_interesting_introspection( &mut self, state: &mut S, manager: &mut EM, - input: &S::Input, + input: &I, observers: &OT, exit_kind: &ExitKind, ) -> Result where - EM: EventFirer, - OT: ObserversTuple, + S: HasClientPerfMonitor, { - FL::is_pair_interesting_introspection( - &mut self.first, - &mut self.second, + FL::is_pair_interesting( + |state, manager, input, observers, exit_kind| { + self.first + .is_interesting_introspection(state, manager, input, observers, exit_kind) + }, + |state, manager, input, observers, exit_kind| { + self.second + .is_interesting_introspection(state, manager, input, observers, exit_kind) + }, state, manager, input, @@ -285,18 +276,30 @@ where ) } + #[cfg(feature = "track_hit_feedbacks")] + fn last_result(&self) -> Result { + FL::last_result(self.first.last_result(), self.second.last_result()) + } + + #[cfg(feature = "track_hit_feedbacks")] + fn append_hit_feedbacks(&self, list: &mut Vec>) -> Result<(), Error> { + FL::append_hit_feedbacks( + self.first.last_result(), + |list| self.first.append_hit_feedbacks(list), + self.second.last_result(), + |list| self.second.append_hit_feedbacks(list), + list, + ) + } + #[inline] - fn append_metadata( + fn append_metadata( &mut self, state: &mut S, manager: &mut EM, observers: &OT, - testcase: &mut Testcase, - ) -> Result<(), Error> - where - OT: ObserversTuple, - EM: EventFirer, - { + testcase: &mut Testcase, + ) -> Result<(), Error> { self.first .append_metadata(state, manager, observers, testcase)?; self.second @@ -304,21 +307,19 @@ where } #[inline] - fn discard_metadata(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { + fn discard_metadata(&mut self, state: &mut S, input: &I) -> Result<(), Error> { self.first.discard_metadata(state, input)?; self.second.discard_metadata(state, input) } } -impl FeedbackFactory, T> - for CombinedFeedback +impl FeedbackFactory, T> for CombinedFeedback where - A: Feedback + FeedbackFactory, - B: Feedback + FeedbackFactory, - FL: FeedbackLogic, - S: State, + A: FeedbackFactory + Named, + B: FeedbackFactory + Named, + FL: FeedbackLogic, { - fn create_feedback(&self, ctx: &T) -> CombinedFeedback { + fn create_feedback(&self, ctx: &T) -> CombinedFeedback { CombinedFeedback::new( self.first.create_feedback(ctx), self.second.create_feedback(ctx), @@ -327,58 +328,46 @@ where } /// Logical combination of two feedbacks -pub trait FeedbackLogic: 'static -where - A: Feedback, - B: Feedback, - S: State, -{ +pub trait FeedbackLogic { /// The name of this combination fn name() -> &'static str; - /// If the feedback pair is interesting - fn is_pair_interesting( - first: &mut A, - second: &mut B, + /// If the feedback pair is interesting. + /// + /// `first` and `second` are closures which invoke the corresponding + /// [`Feedback::is_interesting`] methods of the associated feedbacks. Implementors may choose to + /// use the closure or not, depending on eagerness logic + fn is_pair_interesting( + first: F1, + second: F2, state: &mut S, manager: &mut EM, - input: &S::Input, + input: &I, observers: &OT, exit_kind: &ExitKind, ) -> Result where - EM: EventFirer, - OT: ObserversTuple; + F1: FnOnce(&mut S, &mut EM, &I, &OT, &ExitKind) -> Result, + F2: FnOnce(&mut S, &mut EM, &I, &OT, &ExitKind) -> Result; /// Get the result of the last `Self::is_interesting` run #[cfg(feature = "track_hit_feedbacks")] - fn last_result(first: &A, second: &B) -> Result; + fn last_result(first: Result, second: Result) -> Result; - /// Append this [`Feedback`]'s name if [`Feedback::last_result`] is true - /// If you have any nested Feedbacks, you must call this function on them if relevant. - /// See the implementations of [`CombinedFeedback`] + /// Append each [`Feedback`]'s name according to the logic implemented by this + /// [`FeedbackLogic`]. `if_first` and `if_second` are closures which invoke the corresponding + /// [`Feedback::append_hit_feedbacks`] logics of the relevant closures. #[cfg(feature = "track_hit_feedbacks")] - fn append_hit_feedbacks( - first: &A, - second: &B, + fn append_hit_feedbacks( + first_result: Result, + if_first: F1, + second_result: Result, + if_second: F2, list: &mut Vec>, - ) -> Result<(), Error>; - - /// If this pair is interesting (with introspection features enabled) - #[cfg(feature = "introspection")] - #[allow(clippy::too_many_arguments)] - fn is_pair_interesting_introspection( - first: &mut A, - second: &mut B, - state: &mut S, - manager: &mut EM, - input: &S::Input, - observers: &OT, - exit_kind: &ExitKind, - ) -> Result + ) -> Result<(), Error> where - EM: EventFirer, - OT: ObserversTuple; + F1: FnOnce(&mut Vec>) -> Result<(), Error>, + F2: FnOnce(&mut Vec>) -> Result<(), Error>; } /// Factory for feedbacks which should be sensitive to an existing context, e.g. observer(s) from a @@ -397,440 +386,321 @@ where } } /// Eager `OR` combination of two feedbacks +/// +/// When the `track_hit_feedbacks` feature is used, [`LogicEagerOr`]'s hit feedback preferences will +/// behave like [`LogicFastOr`]'s because the second feedback will not have contributed to the +/// result. When using [`crate::feedback_or`], ensure that you set the first parameter to the +/// prioritized feedback. #[derive(Debug, Clone)] -pub struct LogicEagerOr {} - +pub struct LogicEagerOr; /// Fast `OR` combination of two feedbacks #[derive(Debug, Clone)] -pub struct LogicFastOr {} +pub struct LogicFastOr; /// Eager `AND` combination of two feedbacks #[derive(Debug, Clone)] -pub struct LogicEagerAnd {} +pub struct LogicEagerAnd; /// Fast `AND` combination of two feedbacks #[derive(Debug, Clone)] -pub struct LogicFastAnd {} +pub struct LogicFastAnd; -impl FeedbackLogic for LogicEagerOr -where - A: Feedback, - B: Feedback, - S: State, -{ +impl FeedbackLogic for LogicEagerOr { fn name() -> &'static str { "Eager OR" } - fn is_pair_interesting( - first: &mut A, - second: &mut B, + fn is_pair_interesting( + first: F1, + second: F2, state: &mut S, manager: &mut EM, - input: &S::Input, + input: &I, observers: &OT, exit_kind: &ExitKind, ) -> Result where - EM: EventFirer, - OT: ObserversTuple, + F1: FnOnce(&mut S, &mut EM, &I, &OT, &ExitKind) -> Result, + F2: FnOnce(&mut S, &mut EM, &I, &OT, &ExitKind) -> Result, { - let a = first.is_interesting(state, manager, input, observers, exit_kind)?; - let b = second.is_interesting(state, manager, input, observers, exit_kind)?; - Ok(a || b) + Ok(first(state, manager, input, observers, exit_kind)? + | second(state, manager, input, observers, exit_kind)?) } #[cfg(feature = "track_hit_feedbacks")] - fn last_result(first: &A, second: &B) -> Result { - Ok(first.last_result()? || second.last_result()?) + fn last_result(first: Result, second: Result) -> Result { + first.and_then(|first| second.map(|second| first | second)) } /// Note: Eager OR's hit feedbacks will behave like Fast OR /// because the second feedback will not have contributed to the result. /// Set the second feedback as the first (A, B) vs (B, A) /// to "prioritize" the result in case of Eager OR. #[cfg(feature = "track_hit_feedbacks")] - fn append_hit_feedbacks( - first: &A, - second: &B, + fn append_hit_feedbacks( + first_result: Result, + if_first: F1, + second_result: Result, + if_second: F2, list: &mut Vec>, - ) -> Result<(), Error> { - if first.last_result()? { - first.append_hit_feedbacks(list)?; - } else if second.last_result()? { - second.append_hit_feedbacks(list)?; - } - Ok(()) - } - - #[cfg(feature = "introspection")] - fn is_pair_interesting_introspection( - first: &mut A, - second: &mut B, - state: &mut S, - manager: &mut EM, - input: &S::Input, - observers: &OT, - exit_kind: &ExitKind, - ) -> Result + ) -> Result<(), Error> where - EM: EventFirer, - OT: ObserversTuple, + F1: FnOnce(&mut Vec>) -> Result<(), Error>, + F2: FnOnce(&mut Vec>) -> Result<(), Error>, { - // Execute this feedback - let a = first.is_interesting_introspection(state, manager, input, observers, exit_kind)?; - - let b = second.is_interesting_introspection(state, manager, input, observers, exit_kind)?; - Ok(a || b) + LogicFastOr::append_hit_feedbacks(first_result, if_first, second_result, if_second, list) } } -impl FeedbackLogic for LogicFastOr -where - A: Feedback, - B: Feedback, - S: State, -{ +impl FeedbackLogic for LogicFastOr { fn name() -> &'static str { "Fast OR" } - fn is_pair_interesting( - first: &mut A, - second: &mut B, + fn is_pair_interesting( + first: F1, + second: F2, state: &mut S, manager: &mut EM, - input: &S::Input, + input: &I, observers: &OT, exit_kind: &ExitKind, ) -> Result where - EM: EventFirer, - OT: ObserversTuple, + F1: FnOnce(&mut S, &mut EM, &I, &OT, &ExitKind) -> Result, + F2: FnOnce(&mut S, &mut EM, &I, &OT, &ExitKind) -> Result, { - let a = first.is_interesting(state, manager, input, observers, exit_kind)?; + let a = first(state, manager, input, observers, exit_kind)?; if a { return Ok(true); } - second.is_interesting(state, manager, input, observers, exit_kind) + second(state, manager, input, observers, exit_kind) } #[cfg(feature = "track_hit_feedbacks")] - fn last_result(first: &A, second: &B) -> Result { - if first.last_result()? { - return Ok(true); - } - - // The second must have run if the first wasn't interesting - second.last_result() + fn last_result(first: Result, second: Result) -> Result { + first.and_then(|first| Ok(first || second?)) } + /// Note: Eager OR's hit feedbacks will behave like Fast OR + /// because the second feedback will not have contributed to the result. + /// Set the second feedback as the first (A, B) vs (B, A) + /// to "prioritize" the result in case of Eager OR. #[cfg(feature = "track_hit_feedbacks")] - fn append_hit_feedbacks( - first: &A, - second: &B, + fn append_hit_feedbacks( + first_result: Result, + if_first: F1, + second_result: Result, + if_second: F2, list: &mut Vec>, - ) -> Result<(), Error> { - if first.last_result()? { - first.append_hit_feedbacks(list)?; - } else if second.last_result()? { - second.append_hit_feedbacks(list)?; - } - Ok(()) - } - - #[cfg(feature = "introspection")] - fn is_pair_interesting_introspection( - first: &mut A, - second: &mut B, - state: &mut S, - manager: &mut EM, - input: &S::Input, - observers: &OT, - exit_kind: &ExitKind, - ) -> Result + ) -> Result<(), Error> where - EM: EventFirer, - OT: ObserversTuple, + F1: FnOnce(&mut Vec>) -> Result<(), Error>, + F2: FnOnce(&mut Vec>) -> Result<(), Error>, { - // Execute this feedback - let a = first.is_interesting_introspection(state, manager, input, observers, exit_kind)?; - - if a { - return Ok(true); + if first_result? { + if_first(list) + } else if second_result? { + if_second(list) + } else { + Ok(()) } - - second.is_interesting_introspection(state, manager, input, observers, exit_kind) } } -impl FeedbackLogic for LogicEagerAnd -where - A: Feedback, - B: Feedback, - S: State, -{ +impl FeedbackLogic for LogicEagerAnd { fn name() -> &'static str { "Eager AND" } - fn is_pair_interesting( - first: &mut A, - second: &mut B, + fn is_pair_interesting( + first: F1, + second: F2, state: &mut S, manager: &mut EM, - input: &S::Input, + input: &I, observers: &OT, exit_kind: &ExitKind, ) -> Result where - EM: EventFirer, - OT: ObserversTuple, + F1: FnOnce(&mut S, &mut EM, &I, &OT, &ExitKind) -> Result, + F2: FnOnce(&mut S, &mut EM, &I, &OT, &ExitKind) -> Result, { - let a = first.is_interesting(state, manager, input, observers, exit_kind)?; - let b = second.is_interesting(state, manager, input, observers, exit_kind)?; - Ok(a && b) + Ok(first(state, manager, input, observers, exit_kind)? + & second(state, manager, input, observers, exit_kind)?) } #[cfg(feature = "track_hit_feedbacks")] - fn last_result(first: &A, second: &B) -> Result { - Ok(first.last_result()? && second.last_result()?) + fn last_result(first: Result, second: Result) -> Result { + Ok(first? & second?) } + #[cfg(feature = "track_hit_feedbacks")] - fn append_hit_feedbacks( - first: &A, - second: &B, + fn append_hit_feedbacks( + first_result: Result, + if_first: F1, + second_result: Result, + if_second: F2, list: &mut Vec>, - ) -> Result<(), Error> { - if first.last_result()? && second.last_result()? { - first.append_hit_feedbacks(list)?; - second.append_hit_feedbacks(list)?; + ) -> Result<(), Error> + where + F1: FnOnce(&mut Vec>) -> Result<(), Error>, + F2: FnOnce(&mut Vec>) -> Result<(), Error>, + { + if first_result? & second_result? { + if_first(list)?; + if_second(list)?; } Ok(()) } - - #[cfg(feature = "introspection")] - fn is_pair_interesting_introspection( - first: &mut A, - second: &mut B, - state: &mut S, - manager: &mut EM, - input: &S::Input, - observers: &OT, - exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - // Execute this feedback - let a = first.is_interesting_introspection(state, manager, input, observers, exit_kind)?; - - let b = second.is_interesting_introspection(state, manager, input, observers, exit_kind)?; - Ok(a && b) - } } -impl FeedbackLogic for LogicFastAnd -where - A: Feedback, - B: Feedback, - S: State, -{ +impl FeedbackLogic for LogicFastAnd { fn name() -> &'static str { "Fast AND" } - fn is_pair_interesting( - first: &mut A, - second: &mut B, + fn is_pair_interesting( + first: F1, + second: F2, state: &mut S, manager: &mut EM, - input: &S::Input, + input: &I, observers: &OT, exit_kind: &ExitKind, ) -> Result where - EM: EventFirer, - OT: ObserversTuple, + F1: FnOnce(&mut S, &mut EM, &I, &OT, &ExitKind) -> Result, + F2: FnOnce(&mut S, &mut EM, &I, &OT, &ExitKind) -> Result, { - let a = first.is_interesting(state, manager, input, observers, exit_kind)?; - if !a { - return Ok(false); - } - - second.is_interesting(state, manager, input, observers, exit_kind) + Ok(first(state, manager, input, observers, exit_kind)? + && second(state, manager, input, observers, exit_kind)?) } #[cfg(feature = "track_hit_feedbacks")] - fn last_result(first: &A, second: &B) -> Result { - if !first.last_result()? { - return Ok(false); - } - - // The second must have run if the first wasn't interesting - second.last_result() + fn last_result(first: Result, second: Result) -> Result { + Ok(first? && second?) } #[cfg(feature = "track_hit_feedbacks")] - fn append_hit_feedbacks( - first: &A, - second: &B, + fn append_hit_feedbacks( + first_result: Result, + if_first: F1, + second_result: Result, + if_second: F2, list: &mut Vec>, - ) -> Result<(), Error> { - if first.last_result()? { - first.append_hit_feedbacks(list)?; - } else if second.last_result()? { - second.append_hit_feedbacks(list)?; + ) -> Result<(), Error> + where + F1: FnOnce(&mut Vec>) -> Result<(), Error>, + F2: FnOnce(&mut Vec>) -> Result<(), Error>, + { + if first_result? && second_result? { + if_first(list)?; + if_second(list)?; } Ok(()) } - - #[cfg(feature = "introspection")] - fn is_pair_interesting_introspection( - first: &mut A, - second: &mut B, - state: &mut S, - manager: &mut EM, - input: &S::Input, - observers: &OT, - exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - // Execute this feedback - let a = first.is_interesting_introspection(state, manager, input, observers, exit_kind)?; - - if !a { - return Ok(false); - } - - second.is_interesting_introspection(state, manager, input, observers, exit_kind) - } } /// Combine two feedbacks with an eager AND operation, /// will call all feedbacks functions even if not necessary to conclude the result -pub type EagerAndFeedback = CombinedFeedback; +pub type EagerAndFeedback = CombinedFeedback; /// Combine two feedbacks with an fast AND operation, /// might skip calling feedbacks functions if not necessary to conclude the result -pub type FastAndFeedback = CombinedFeedback; +pub type FastAndFeedback = CombinedFeedback; /// Combine two feedbacks with an eager OR operation, /// will call all feedbacks functions even if not necessary to conclude the result -pub type EagerOrFeedback = CombinedFeedback; +pub type EagerOrFeedback = CombinedFeedback; -/// Combine two feedbacks with an fast OR operation, -/// might skip calling feedbacks functions if not necessary to conclude the result. +/// Combine two feedbacks with an fast OR operation - fast. +/// +/// This might skip calling feedbacks functions if not necessary to conclude the result. /// This means any feedback that is not first might be skipped, use caution when using with /// `TimeFeedback` -pub type FastOrFeedback = CombinedFeedback; +pub type FastOrFeedback = CombinedFeedback; /// Compose feedbacks with an `NOT` operation -#[derive(Clone)] -pub struct NotFeedback -where - A: Feedback, - S: State, -{ +#[derive(Clone, Debug)] +pub struct NotFeedback { /// The feedback to invert - pub first: A, + pub inner: A, /// The name name: Cow<'static, str>, - phantom: PhantomData, } -impl Debug for NotFeedback +impl StateInitializer for NotFeedback where - A: Feedback + Debug, - S: State, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("NotFeedback") - .field("name", &self.name) - .field("first", &self.first) - .finish() - } -} - -impl Feedback for NotFeedback -where - A: Feedback, - S: State, + A: StateInitializer, { fn init_state(&mut self, state: &mut S) -> Result<(), Error> { - self.first.init_state(state) + self.inner.init_state(state) } +} +impl Feedback for NotFeedback +where + A: Feedback, +{ #[allow(clippy::wrong_self_convention)] - fn is_interesting( + fn is_interesting( &mut self, state: &mut S, manager: &mut EM, - input: &S::Input, + input: &I, observers: &OT, exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { + ) -> Result { Ok(!self - .first + .inner .is_interesting(state, manager, input, observers, exit_kind)?) } - #[inline] - fn append_metadata( - &mut self, - state: &mut S, - manager: &mut EM, - observers: &OT, - testcase: &mut Testcase, - ) -> Result<(), Error> - where - OT: ObserversTuple, - EM: EventFirer, - { - self.first - .append_metadata(state, manager, observers, testcase) - } - - #[inline] - fn discard_metadata(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { - self.first.discard_metadata(state, input) - } - #[cfg(feature = "track_hit_feedbacks")] fn last_result(&self) -> Result { - Ok(!self.first.last_result()?) + Ok(!self.inner.last_result()?) + } + + #[inline] + fn append_metadata( + &mut self, + state: &mut S, + manager: &mut EM, + observers: &OT, + testcase: &mut Testcase, + ) -> Result<(), Error> { + self.inner + .append_metadata(state, manager, observers, testcase) + } + + #[inline] + fn discard_metadata(&mut self, state: &mut S, input: &I) -> Result<(), Error> { + self.inner.discard_metadata(state, input) } } -impl Named for NotFeedback -where - A: Feedback, - S: State, -{ +impl Named for NotFeedback { #[inline] fn name(&self) -> &Cow<'static, str> { &self.name } } -impl NotFeedback +impl FeedbackFactory, T> for NotFeedback where - A: Feedback, - S: State, + A: Named + FeedbackFactory, +{ + fn create_feedback(&self, ctx: &T) -> NotFeedback { + NotFeedback::new(self.inner.create_feedback(ctx)) + } +} + +impl NotFeedback +where + A: Named, { /// Creates a new [`NotFeedback`]. - pub fn new(first: A) -> Self { - let name = Cow::from(format!("Not({})", first.name())); - Self { - first, - name, - phantom: PhantomData, - } + pub fn new(inner: A) -> Self { + let name = Cow::from(format!("Not({})", inner.name())); + Self { inner, name } } } @@ -894,243 +764,155 @@ macro_rules! feedback_not { }; } +impl StateInitializer for () {} + /// Hack to use () as empty Feedback -impl Feedback for () -where - S: State, -{ - #[allow(clippy::wrong_self_convention)] - fn is_interesting( - &mut self, - _state: &mut S, - _manager: &mut EM, - _input: &S::Input, - _observers: &OT, - _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - Ok(false) - } +impl Feedback for () { #[cfg(feature = "track_hit_feedbacks")] fn last_result(&self) -> Result { Ok(false) } } +/// Logic for measuring whether a given [`ExitKind`] is interesting as a [`Feedback`]. Use with +/// [`ExitKindFeedback`]. +pub trait ExitKindLogic { + /// The name of this kind of logic + const NAME: Cow<'static, str>; + + /// Check whether the provided [`ExitKind`] is actually interesting + fn check_exit_kind(kind: &ExitKind) -> Result; +} +/// Name used by `CrashFeedback` +pub const CRASH_FEEDBACK_NAME: &str = "CrashFeedback"; +/// Logic which finds all [`ExitKind::Crash`] exits interesting +#[derive(Debug, Copy, Clone)] +pub struct CrashLogic; + +impl ExitKindLogic for CrashLogic { + const NAME: Cow<'static, str> = Cow::Borrowed(CRASH_FEEDBACK_NAME); + + fn check_exit_kind(kind: &ExitKind) -> Result { + Ok(matches!(kind, ExitKind::Crash)) + } +} +/// Name used by `TimeoutFeedback` +pub const TIMEOUT_FEEDBACK_NAME: &str = "TimeoutFeedback"; + +/// Logic which finds all [`ExitKind::Timeout`] exits interesting +#[derive(Debug, Copy, Clone)] +pub struct TimeoutLogic; + +impl ExitKindLogic for TimeoutLogic { + const NAME: Cow<'static, str> = Cow::Borrowed(TIMEOUT_FEEDBACK_NAME); + + fn check_exit_kind(kind: &ExitKind) -> Result { + Ok(matches!(kind, ExitKind::Timeout)) + } +} + +/// Logic which finds all [`ExitKind::Diff`] exits interesting +#[derive(Debug, Copy, Clone)] +pub struct GenericDiffLogic; + +impl ExitKindLogic for GenericDiffLogic { + const NAME: Cow<'static, str> = Cow::Borrowed("DiffExitKindFeedback"); + + fn check_exit_kind(kind: &ExitKind) -> Result { + Ok(matches!(kind, ExitKind::Diff { .. })) + } +} + +/// A generic exit type checking feedback. Use [`CrashFeedback`], [`TimeoutFeedback`], or +/// [`DiffExitKindFeedback`] directly instead. +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct ExitKindFeedback { + #[cfg(feature = "track_hit_feedbacks")] + /// The previous run's result of [`Self::is_interesting`] + last_result: Option, + name: Cow<'static, str>, + phantom: PhantomData L>, +} + +impl StateInitializer for ExitKindFeedback where L: ExitKindLogic {} + +impl Feedback for ExitKindFeedback +where + L: ExitKindLogic, +{ + #[allow(clippy::wrong_self_convention)] + fn is_interesting( + &mut self, + _state: &mut S, + _manager: &mut EM, + _input: &I, + _observers: &OT, + exit_kind: &ExitKind, + ) -> Result { + let res = L::check_exit_kind(exit_kind)?; + #[cfg(feature = "track_hit_feedbacks")] + { + self.last_result = Some(res); + } + Ok(res) + } + + #[cfg(feature = "track_hit_feedbacks")] + fn last_result(&self) -> Result { + self.last_result.ok_or(premature_last_result_err()) + } +} + +impl Named for ExitKindFeedback { + #[inline] + fn name(&self) -> &Cow<'static, str> { + &self.name + } +} + +impl ExitKindFeedback +where + L: ExitKindLogic, +{ + /// Creates a new [`ExitKindFeedback`] + #[must_use] + pub fn new() -> Self { + Self { + #[cfg(feature = "track_hit_feedbacks")] + last_result: None, + name: L::NAME, + phantom: PhantomData, + } + } +} + +impl Default for ExitKindFeedback +where + L: ExitKindLogic, +{ + fn default() -> Self { + Self::new() + } +} + +impl FeedbackFactory, T> for ExitKindFeedback +where + L: ExitKindLogic, +{ + fn create_feedback(&self, _ctx: &T) -> ExitKindFeedback { + Self::new() + } +} + /// A [`CrashFeedback`] reports as interesting if the target crashed. -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct CrashFeedback { - #[cfg(feature = "track_hit_feedbacks")] - // The previous run's result of `Self::is_interesting` - last_result: Option, -} - -impl Feedback for CrashFeedback -where - S: State, -{ - #[allow(clippy::wrong_self_convention)] - fn is_interesting( - &mut self, - _state: &mut S, - _manager: &mut EM, - _input: &S::Input, - _observers: &OT, - exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - let res = matches!(exit_kind, ExitKind::Crash); - #[cfg(feature = "track_hit_feedbacks")] - { - self.last_result = Some(res); - } - Ok(res) - } - - #[cfg(feature = "track_hit_feedbacks")] - fn last_result(&self) -> Result { - self.last_result.ok_or(premature_last_result_err()) - } -} - -impl Named for CrashFeedback { - #[inline] - fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed("CrashFeedback"); - &NAME - } -} - -impl CrashFeedback { - /// Creates a new [`CrashFeedback`] - #[must_use] - pub fn new() -> Self { - Self { - #[cfg(feature = "track_hit_feedbacks")] - last_result: None, - } - } -} - -impl Default for CrashFeedback { - fn default() -> Self { - Self::new() - } -} - -impl FeedbackFactory for CrashFeedback { - fn create_feedback(&self, _ctx: &T) -> CrashFeedback { - CrashFeedback::new() - } -} - +pub type CrashFeedback = ExitKindFeedback; /// A [`TimeoutFeedback`] reduces the timeout value of a run. -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct TimeoutFeedback { - #[cfg(feature = "track_hit_feedbacks")] - // The previous run's result of `Self::is_interesting` - last_result: Option, -} - -impl Feedback for TimeoutFeedback -where - S: State, -{ - #[allow(clippy::wrong_self_convention)] - fn is_interesting( - &mut self, - _state: &mut S, - _manager: &mut EM, - _input: &S::Input, - _observers: &OT, - exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - let res = matches!(exit_kind, ExitKind::Timeout); - #[cfg(feature = "track_hit_feedbacks")] - { - self.last_result = Some(res); - } - Ok(res) - } - - #[cfg(feature = "track_hit_feedbacks")] - fn last_result(&self) -> Result { - self.last_result.ok_or(premature_last_result_err()) - } -} - -impl Named for TimeoutFeedback { - #[inline] - fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed("TimeoutFeedback"); - &NAME - } -} - -impl TimeoutFeedback { - /// Returns a new [`TimeoutFeedback`]. - #[must_use] - pub fn new() -> Self { - Self { - #[cfg(feature = "track_hit_feedbacks")] - last_result: None, - } - } -} - -impl Default for TimeoutFeedback { - fn default() -> Self { - Self::new() - } -} - -/// A feedback factory for timeout feedbacks -impl FeedbackFactory for TimeoutFeedback { - fn create_feedback(&self, _ctx: &T) -> TimeoutFeedback { - TimeoutFeedback::new() - } -} - -/// A [`DiffExitKindFeedback`] checks if there is a difference in the [`crate::executors::ExitKind`]s in a [`crate::executors::DiffExecutor`]. -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct DiffExitKindFeedback { - #[cfg(feature = "track_hit_feedbacks")] - // The previous run's result of `Self::is_interesting` - last_result: Option, -} - -impl Feedback for DiffExitKindFeedback -where - S: State, -{ - #[allow(clippy::wrong_self_convention)] - fn is_interesting( - &mut self, - _state: &mut S, - _manager: &mut EM, - _input: &S::Input, - _observers: &OT, - exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - let res = matches!(exit_kind, ExitKind::Diff { .. }); - #[cfg(feature = "track_hit_feedbacks")] - { - self.last_result = Some(res); - } - Ok(res) - } - #[cfg(feature = "track_hit_feedbacks")] - fn last_result(&self) -> Result { - self.last_result.ok_or(premature_last_result_err()) - } -} - -impl Named for DiffExitKindFeedback { - #[inline] - fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed("DiffExitKindFeedback"); - &NAME - } -} - -impl DiffExitKindFeedback { - /// Returns a new [`DiffExitKindFeedback`]. - #[must_use] - pub fn new() -> Self { - Self { - #[cfg(feature = "track_hit_feedbacks")] - last_result: None, - } - } -} - -impl Default for DiffExitKindFeedback { - fn default() -> Self { - Self::new() - } -} - -/// A feedback factory for diff exit kind feedbacks -impl FeedbackFactory for DiffExitKindFeedback { - fn create_feedback(&self, _ctx: &T) -> DiffExitKindFeedback { - DiffExitKindFeedback::new() - } -} +pub type TimeoutFeedback = ExitKindFeedback; +/// A [`DiffExitKindFeedback`] checks if there is a difference in the [`ExitKind`]s in a [`crate::executors::DiffExecutor`]. +pub type DiffExitKindFeedback = ExitKindFeedback; +/// A [`Feedback`] to track execution time. +/// /// Nop feedback that annotates execution time in the new testcase, if any /// for this Feedback, the testcase is never interesting (use with an OR). /// It decides, if the given [`TimeObserver`] value of a run is interesting. @@ -1138,56 +920,30 @@ impl FeedbackFactory for DiffExitKindFeedback { pub struct TimeFeedback { observer_handle: Handle, } +impl StateInitializer for TimeFeedback {} -impl Feedback for TimeFeedback +impl Feedback for TimeFeedback where - S: State, + OT: MatchName, { - #[allow(clippy::wrong_self_convention)] - fn is_interesting( - &mut self, - _state: &mut S, - _manager: &mut EM, - _input: &S::Input, - _observers: &OT, - _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - // TODO Replace with match_name_type when stable + #[cfg(feature = "track_hit_feedbacks")] + fn last_result(&self) -> Result { Ok(false) } /// Append to the testcase the generated metadata in case of a new corpus item #[inline] - fn append_metadata( + fn append_metadata( &mut self, _state: &mut S, _manager: &mut EM, observers: &OT, - testcase: &mut Testcase, - ) -> Result<(), Error> - where - OT: ObserversTuple, - EM: EventFirer, - { + testcase: &mut Testcase, + ) -> Result<(), Error> { let observer = observers.get(&self.observer_handle).unwrap(); *testcase.exec_time_mut() = *observer.last_runtime(); Ok(()) } - - /// Discard the stored metadata in case that the testcase is not added to the corpus - #[inline] - fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { - Ok(()) - } - - #[cfg(feature = "track_hit_feedbacks")] - fn last_result(&self) -> Result { - Ok(false) - } } impl Named for TimeFeedback { @@ -1213,28 +969,23 @@ impl TimeFeedback { pub enum ConstFeedback { /// Always returns `true` True, - /// Alsways returns `false` + /// Always returns `false` False, } -impl Feedback for ConstFeedback -where - S: State, -{ +impl StateInitializer for ConstFeedback {} + +impl Feedback for ConstFeedback { #[inline] #[allow(clippy::wrong_self_convention)] - fn is_interesting( + fn is_interesting( &mut self, _state: &mut S, _manager: &mut EM, - _input: &S::Input, + _input: &I, _observers: &OT, _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { + ) -> Result { Ok((*self).into()) } @@ -1279,6 +1030,12 @@ impl From for bool { } } +impl FeedbackFactory for ConstFeedback { + fn create_feedback(&self, _ctx: &T) -> ConstFeedback { + *self + } +} + #[cfg(feature = "track_hit_feedbacks")] /// Error if [`Feedback::last_result`] is called before the `Feedback` is actually run. pub(crate) fn premature_last_result_err() -> Error { diff --git a/libafl/src/feedbacks/nautilus.rs b/libafl/src/feedbacks/nautilus.rs index 4b0d9e1caa..5cc4986a5a 100644 --- a/libafl/src/feedbacks/nautilus.rs +++ b/libafl/src/feedbacks/nautilus.rs @@ -1,6 +1,6 @@ //! Nautilus grammar mutator, see use alloc::{borrow::Cow, string::String}; -use core::{fmt::Debug, marker::PhantomData}; +use core::fmt::Debug; use std::fs::create_dir_all; use libafl_bolts::Named; @@ -9,13 +9,11 @@ use serde::{Deserialize, Serialize}; use crate::{ common::nautilus::grammartec::{chunkstore::ChunkStore, context::Context}, corpus::{Corpus, Testcase}, - events::EventFirer, executors::ExitKind, - feedbacks::Feedback, + feedbacks::{Feedback, StateInitializer}, generators::NautilusContext, inputs::NautilusInput, - observers::ObserversTuple, - state::{HasCorpus, State}, + state::HasCorpus, Error, HasMetadata, }; @@ -51,64 +49,26 @@ impl NautilusChunksMetadata { } /// A nautilus feedback for grammar fuzzing -pub struct NautilusFeedback<'a, S> { +#[derive(Debug)] +pub struct NautilusFeedback<'a> { ctx: &'a Context, - phantom: PhantomData, } -impl Debug for NautilusFeedback<'_, S> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "NautilusFeedback {{}}") - } -} - -impl<'a, S> NautilusFeedback<'a, S> { +impl<'a> NautilusFeedback<'a> { /// Create a new [`NautilusFeedback`] #[must_use] pub fn new(context: &'a NautilusContext) -> Self { - Self { - ctx: &context.ctx, - phantom: PhantomData, - } - } -} - -impl<'a, S> Named for NautilusFeedback<'a, S> { - fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed("NautilusFeedback"); - &NAME - } -} - -impl<'a, S> Feedback for NautilusFeedback<'a, S> -where - S: HasMetadata + HasCorpus + State, -{ - #[allow(clippy::wrong_self_convention)] - fn is_interesting( - &mut self, - _state: &mut S, - _manager: &mut EM, - _input: &NautilusInput, - _observers: &OT, - _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - Ok(false) + Self { ctx: &context.ctx } } - fn append_metadata( + fn append_nautilus_metadata_to_state( &mut self, state: &mut S, - _manager: &mut EM, - _observers: &OT, - testcase: &mut Testcase, + testcase: &mut Testcase, ) -> Result<(), Error> where - OT: ObserversTuple, + S: HasCorpus + HasMetadata, + S::Corpus: Corpus, { state.corpus().load_input_into(testcase)?; let input = testcase.input().as_ref().unwrap().clone(); @@ -117,8 +77,46 @@ where .get_mut::() .expect("NautilusChunksMetadata not in the state"); meta.cks.add_tree(input.tree, self.ctx); + Ok(()) } +} + +impl Named for NautilusFeedback<'_> { + fn name(&self) -> &Cow<'static, str> { + static NAME: Cow<'static, str> = Cow::Borrowed("NautilusFeedback"); + &NAME + } +} + +impl StateInitializer for NautilusFeedback<'_> {} + +impl Feedback for NautilusFeedback<'_> +where + S: HasMetadata + HasCorpus, + S::Corpus: Corpus, +{ + #[allow(clippy::wrong_self_convention)] + fn is_interesting( + &mut self, + _state: &mut S, + _manager: &mut EM, + _input: &NautilusInput, + _observers: &OT, + _exit_kind: &ExitKind, + ) -> Result { + Ok(false) + } + + fn append_metadata( + &mut self, + state: &mut S, + _manager: &mut EM, + _observers: &OT, + testcase: &mut Testcase, + ) -> Result<(), Error> { + self.append_nautilus_metadata_to_state(state, testcase) + } fn discard_metadata(&mut self, _state: &mut S, _input: &NautilusInput) -> Result<(), Error> { Ok(()) diff --git a/libafl/src/feedbacks/new_hash_feedback.rs b/libafl/src/feedbacks/new_hash_feedback.rs index 11427fbc45..b33d14790e 100644 --- a/libafl/src/feedbacks/new_hash_feedback.rs +++ b/libafl/src/feedbacks/new_hash_feedback.rs @@ -1,11 +1,11 @@ //! The ``NewHashFeedback`` uses the backtrace hash and a hashset to only keep novel cases use alloc::{borrow::Cow, string::ToString}; -use std::{fmt::Debug, marker::PhantomData}; +use std::fmt::Debug; use hashbrown::HashSet; use libafl_bolts::{ - tuples::{Handle, Handled, MatchNameRef}, + tuples::{Handle, Handled, MatchName, MatchNameRef}, Named, }; use serde::{Deserialize, Serialize}; @@ -13,12 +13,9 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "track_hit_feedbacks")] use crate::feedbacks::premature_last_result_err; use crate::{ - events::EventFirer, executors::ExitKind, - feedbacks::{Feedback, HasObserverHandle}, - inputs::UsesInput, - observers::{ObserverWithHashField, ObserversTuple}, - state::State, + feedbacks::{Feedback, HasObserverHandle, StateInitializer}, + observers::ObserverWithHashField, Error, HasNamedMetadata, }; @@ -38,7 +35,7 @@ pub trait HashSetState { #[allow(clippy::unsafe_derive_deserialize)] pub struct NewHashFeedbackMetadata { /// Contains information about untouched entries - pub hash_set: HashSet, + hash_set: HashSet, } #[rustfmt::skip] @@ -64,6 +61,17 @@ impl NewHashFeedbackMetadata { self.hash_set.clear(); Ok(()) } + + /// Gets the associated [`HashSet`] being used to track hashes + #[must_use] + pub fn hash_set(&self) -> &HashSet { + &self.hash_set + } + + /// Gets the associated [`HashSet`] being used to track hashes, mutably + pub fn hash_set_mut(&mut self) -> &mut HashSet { + &mut self.hash_set + } } impl HashSetState for NewHashFeedbackMetadata { @@ -82,7 +90,7 @@ impl HashSetState for NewHashFeedbackMetadata { /// A [`NewHashFeedback`] maintains a hashset of already seen stacktraces and considers interesting unseen ones #[derive(Serialize, Deserialize, Clone, Debug)] -pub struct NewHashFeedback { +pub struct NewHashFeedback { name: Cow<'static, str>, o_ref: Handle, /// Initial capacity of hash set @@ -90,34 +98,20 @@ pub struct NewHashFeedback { #[cfg(feature = "track_hit_feedbacks")] // The previous run's result of `Self::is_interesting` last_result: Option, - phantom: PhantomData, } -impl Feedback for NewHashFeedback +impl NewHashFeedback where O: ObserverWithHashField + Named, - S: State + HasNamedMetadata, { - fn init_state(&mut self, state: &mut S) -> Result<(), Error> { - state.add_named_metadata( - &self.name, - NewHashFeedbackMetadata::with_capacity(self.capacity), - ); - Ok(()) - } - #[allow(clippy::wrong_self_convention)] - fn is_interesting( + fn has_interesting_backtrace_hash_observation( &mut self, state: &mut S, - _manager: &mut EM, - _input: &::Input, observers: &OT, - _exit_kind: &ExitKind, ) -> Result where - EM: EventFirer, - OT: ObserversTuple, + OT: MatchName, { let observer = observers .get(&self.o_ref) @@ -141,20 +135,53 @@ where } Ok(res) } +} + +impl StateInitializer for NewHashFeedback +where + S: HasNamedMetadata, +{ + fn init_state(&mut self, state: &mut S) -> Result<(), Error> { + state.add_named_metadata( + &self.name, + NewHashFeedbackMetadata::with_capacity(self.capacity), + ); + Ok(()) + } +} + +impl Feedback for NewHashFeedback +where + O: ObserverWithHashField + Named, + OT: MatchName, + S: HasNamedMetadata, +{ + #[allow(clippy::wrong_self_convention)] + fn is_interesting( + &mut self, + state: &mut S, + _manager: &mut EM, + _input: &I, + observers: &OT, + _exit_kind: &ExitKind, + ) -> Result { + self.has_interesting_backtrace_hash_observation(state, observers) + } + #[cfg(feature = "track_hit_feedbacks")] fn last_result(&self) -> Result { self.last_result.ok_or(premature_last_result_err()) } } -impl Named for NewHashFeedback { +impl Named for NewHashFeedback { #[inline] fn name(&self) -> &Cow<'static, str> { &self.name } } -impl HasObserverHandle for NewHashFeedback { +impl HasObserverHandle for NewHashFeedback { type Observer = O; #[inline] @@ -169,9 +196,9 @@ impl HasObserverHandle for NewHashFeedback { /// runs of the target, producing many different feedbacks. const DEFAULT_CAPACITY: usize = 4096; -impl NewHashFeedback +impl NewHashFeedback where - O: ObserverWithHashField + Named, + O: Named, { /// Returns a new [`NewHashFeedback`]. #[must_use] @@ -189,7 +216,6 @@ where capacity, #[cfg(feature = "track_hit_feedbacks")] last_result: None, - phantom: PhantomData, } } } diff --git a/libafl/src/feedbacks/stdio.rs b/libafl/src/feedbacks/stdio.rs index 2007305c63..b4a140e228 100644 --- a/libafl/src/feedbacks/stdio.rs +++ b/libafl/src/feedbacks/stdio.rs @@ -4,18 +4,15 @@ use alloc::{borrow::Cow, string::String}; use libafl_bolts::{ impl_serdeany, - tuples::{Handle, Handled, MatchNameRef}, + tuples::{Handle, Handled, MatchName, MatchNameRef}, Named, }; use serde::{Deserialize, Serialize}; use crate::{ corpus::Testcase, - events::EventFirer, - executors::ExitKind, - feedbacks::Feedback, - observers::{ObserversTuple, StdErrObserver, StdOutObserver}, - state::State, + feedbacks::{Feedback, StateInitializer}, + observers::{StdErrObserver, StdOutObserver}, Error, HasMetadata, }; @@ -35,39 +32,16 @@ pub struct StdOutToMetadataFeedback { o_ref: Handle, } -impl Feedback for StdOutToMetadataFeedback -where - S: State, -{ - #[allow(clippy::wrong_self_convention)] - #[inline] - fn is_interesting( - &mut self, - _state: &mut S, - _manager: &mut EM, - _input: &S::Input, - _observers: &OT, - _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { - Ok(false) - } - +impl StdOutToMetadataFeedback { /// Append to the testcase the generated metadata in case of a new corpus item. #[inline] - fn append_metadata( + fn append_stdout_observation_to_testcase( &mut self, - _state: &mut S, - _manager: &mut EM, observers: &OT, - testcase: &mut Testcase, + testcase: &mut Testcase, ) -> Result<(), Error> where - OT: ObserversTuple, - EM: EventFirer, + OT: MatchName, { let observer = observers .get(&self.o_ref) @@ -84,17 +58,30 @@ where Ok(()) } +} - /// Discard the stored metadata in case that the testcase is not added to the corpus. - #[inline] - fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { - Ok(()) - } +impl StateInitializer for StdOutToMetadataFeedback {} +impl Feedback for StdOutToMetadataFeedback +where + OT: MatchName, +{ #[cfg(feature = "track_hit_feedbacks")] fn last_result(&self) -> Result { Ok(false) } + + /// Append to the testcase the generated metadata in case of a new corpus item. + #[inline] + fn append_metadata( + &mut self, + _state: &mut S, + _manager: &mut EM, + observers: &OT, + testcase: &mut Testcase, + ) -> Result<(), Error> { + self.append_stdout_observation_to_testcase(observers, testcase) + } } impl Named for StdOutToMetadataFeedback { @@ -130,40 +117,26 @@ pub struct StdErrToMetadataFeedback { o_ref: Handle, } -impl Feedback for StdErrToMetadataFeedback +impl StateInitializer for StdErrToMetadataFeedback {} + +impl Feedback for StdErrToMetadataFeedback where - S: State, + OT: MatchName, { - #[allow(clippy::wrong_self_convention)] - #[inline] - fn is_interesting( - &mut self, - _state: &mut S, - _manager: &mut EM, - _input: &S::Input, - _observers: &OT, - _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { + #[cfg(feature = "track_hit_feedbacks")] + fn last_result(&self) -> Result { Ok(false) } /// Append to the testcase the generated metadata in case of a new corpus item. #[inline] - fn append_metadata( + fn append_metadata( &mut self, _state: &mut S, _manager: &mut EM, observers: &OT, - testcase: &mut Testcase, - ) -> Result<(), Error> - where - OT: ObserversTuple, - EM: EventFirer, - { + testcase: &mut Testcase, + ) -> Result<(), Error> { let observer = observers .get(&self.o_ref) .ok_or(Error::illegal_state("StdErrObserver is missing"))?; @@ -179,16 +152,6 @@ where Ok(()) } - - /// Discard the stored metadata in case that the testcase is not added to the corpus. - #[inline] - fn discard_metadata(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { - Ok(()) - } - #[cfg(feature = "track_hit_feedbacks")] - fn last_result(&self) -> Result { - Ok(false) - } } impl Named for StdErrToMetadataFeedback { diff --git a/libafl/src/feedbacks/transferred.rs b/libafl/src/feedbacks/transferred.rs index 16530751ad..84f848922f 100644 --- a/libafl/src/feedbacks/transferred.rs +++ b/libafl/src/feedbacks/transferred.rs @@ -9,15 +9,18 @@ use serde::{Deserialize, Serialize}; #[cfg(feature = "track_hit_feedbacks")] use crate::feedbacks::premature_last_result_err; use crate::{ - events::EventFirer, executors::ExitKind, feedbacks::Feedback, observers::ObserversTuple, - state::State, HasMetadata, + executors::ExitKind, + feedbacks::{Feedback, StateInitializer}, + HasMetadata, }; + /// Constant name of the [`TransferringMetadata`]. pub const TRANSFERRED_FEEDBACK_NAME: Cow<'static, str> = Cow::Borrowed("transferred_feedback_internal"); -/// Metadata which denotes whether we are currently transferring an input. Implementors of -/// multi-node communication systems (like [`crate::events::LlmpEventManager`]) should wrap any +/// Metadata which denotes whether we are currently transferring an input. +/// +/// Implementors of multi-node communication systems (like [`crate::events::LlmpEventManager`]) should wrap any /// [`crate::EvaluatorObservers::evaluate_input_with_observers`] or /// [`crate::ExecutionProcessor::process_execution`] calls with setting this metadata to true/false /// before and after. @@ -50,27 +53,28 @@ impl Named for TransferredFeedback { } } -impl Feedback for TransferredFeedback +impl StateInitializer for TransferredFeedback where - S: HasMetadata + State, + S: HasMetadata, { fn init_state(&mut self, state: &mut S) -> Result<(), Error> { state.add_metadata(TransferringMetadata { transferring: true }); Ok(()) } +} - fn is_interesting( +impl Feedback for TransferredFeedback +where + S: HasMetadata, +{ + fn is_interesting( &mut self, state: &mut S, _manager: &mut EM, - _input: &S::Input, + _input: &I, _observers: &OT, _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { + ) -> Result { let res = state.metadata::()?.transferring; #[cfg(feature = "track_hit_feedbacks")] { @@ -78,6 +82,7 @@ where } Ok(res) } + #[cfg(feature = "track_hit_feedbacks")] fn last_result(&self) -> Result { self.last_result.ok_or(premature_last_result_err()) diff --git a/libafl/src/fuzzer/mod.rs b/libafl/src/fuzzer/mod.rs index 212c668f1f..0bd6955ef5 100644 --- a/libafl/src/fuzzer/mod.rs +++ b/libafl/src/fuzzer/mod.rs @@ -1,11 +1,13 @@ //! The `Fuzzer` is the main struct for a fuzz campaign. -use alloc::string::ToString; +use alloc::{string::ToString, vec::Vec}; use core::{fmt::Debug, marker::PhantomData, time::Duration}; use libafl_bolts::current_time; use serde::{de::DeserializeOwned, Serialize}; +#[cfg(feature = "introspection")] +use crate::monitors::PerfFeature; use crate::{ corpus::{Corpus, CorpusId, HasCurrentCorpusId, HasTestcase, Testcase}, events::{Event, EventConfig, EventFirer, EventProcessor, ProgressReporter}, @@ -15,16 +17,14 @@ use crate::{ mark_feature_time, observers::ObserversTuple, schedulers::Scheduler, - stages::{HasCurrentStage, StagesTuple}, + stages::{HasCurrentStageId, StagesTuple}, start_timer, state::{ - HasCorpus, HasCurrentTestcase, HasExecutions, HasImported, HasLastReportTime, HasSolutions, - UsesState, + HasCorpus, HasCurrentTestcase, HasExecutions, HasLastFoundTime, HasLastReportTime, + HasSolutions, State, Stoppable, UsesState, }, Error, HasMetadata, }; -#[cfg(feature = "introspection")] -use crate::{monitors::PerfFeature, state::HasClientPerfMonitor}; /// Send a monitor update all 15 (or more) seconds const STATS_TIMEOUT_DEFAULT: Duration = Duration::from_secs(15); @@ -35,7 +35,7 @@ where Self::State: HasCorpus, { /// The [`Scheduler`] for this fuzzer - type Scheduler: Scheduler; + type Scheduler: Scheduler; /// The scheduler fn scheduler(&self) -> &Self::Scheduler; @@ -47,7 +47,7 @@ where /// Holds an feedback pub trait HasFeedback: UsesState { /// The feedback type - type Feedback: Feedback; + type Feedback; /// The feedback fn feedback(&self) -> &Self::Feedback; @@ -59,7 +59,7 @@ pub trait HasFeedback: UsesState { /// Holds an objective feedback pub trait HasObjective: UsesState { /// The type of the [`Feedback`] used to find objectives for this fuzzer - type Objective: Feedback; + type Objective; /// The objective feedback fn objective(&self) -> &Self::Objective; @@ -69,9 +69,9 @@ pub trait HasObjective: UsesState { } /// Evaluates if an input is interesting using the feedback -pub trait ExecutionProcessor: UsesState { - /// Evaluate if a set of observation channels has an interesting state - fn execute_no_process( +pub trait ExecutionProcessor: UsesState { + /// Check the outcome of the execution, find if it is worth for corpus or objectives + fn check_results( &mut self, state: &mut Self::State, manager: &mut EM, @@ -80,11 +80,25 @@ pub trait ExecutionProcessor: UsesState { exit_kind: &ExitKind, ) -> Result where - EM: EventFirer; + EM: EventFirer, + OT: ObserversTuple<::Input, Self::State>; /// Process `ExecuteInputResult`. Add to corpus, solution or ignore #[allow(clippy::too_many_arguments)] - fn process_execution( + fn process_execution( + &mut self, + state: &mut Self::State, + manager: &mut EM, + input: &::Input, + exec_res: &ExecuteInputResult, + observers: &OT, + ) -> Result, Error> + where + EM: EventFirer, + OT: ObserversTuple<::Input, Self::State>; + + /// serialize and send event via manager + fn serialize_and_dispatch( &mut self, state: &mut Self::State, manager: &mut EM, @@ -92,13 +106,26 @@ pub trait ExecutionProcessor: UsesState { exec_res: &ExecuteInputResult, observers: &OT, exit_kind: &ExitKind, - send_events: bool, - ) -> Result, Error> + ) -> Result<(), Error> + where + EM: EventFirer, + OT: ObserversTuple<::Input, Self::State> + Serialize; + + /// send event via manager + fn dispatch_event( + &mut self, + state: &mut Self::State, + manager: &mut EM, + input: ::Input, + exec_res: &ExecuteInputResult, + obs_buf: Option>, + exit_kind: &ExitKind, + ) -> Result<(), Error> where EM: EventFirer; /// Evaluate if a set of observation channels has an interesting state - fn execute_and_process( + fn evaluate_execution( &mut self, state: &mut Self::State, manager: &mut EM, @@ -108,15 +135,16 @@ pub trait ExecutionProcessor: UsesState { send_events: bool, ) -> Result<(ExecuteInputResult, Option), Error> where - EM: EventFirer; + EM: EventFirer, + OT: ObserversTuple<::Input, Self::State> + Serialize; } /// Evaluates an input modifying the state of the fuzzer -pub trait EvaluatorObservers: UsesState + Sized { +pub trait EvaluatorObservers: UsesState + Sized { /// Runs the input and triggers observers and feedback, /// returns if is interesting an (option) the index of the new /// [`crate::corpus::Testcase`] in the [`crate::corpus::Corpus`] - fn evaluate_input_with_observers( + fn evaluate_input_with_observers( &mut self, state: &mut Self::State, executor: &mut E, @@ -125,7 +153,7 @@ pub trait EvaluatorObservers: UsesState + Sized { send_events: bool, ) -> Result<(ExecuteInputResult, Option), Error> where - E: Executor + HasObservers, + E: Executor + HasObservers, EM: EventFirer; } @@ -182,7 +210,7 @@ pub trait Evaluator: UsesState { /// The main fuzzer trait. pub trait Fuzzer: Sized + UsesState where - Self::State: HasMetadata + HasExecutions + HasLastReportTime, + Self::State: HasMetadata + HasExecutions + HasLastReportTime + Stoppable, E: UsesState, EM: ProgressReporter, ST: StagesTuple, @@ -214,8 +242,8 @@ where ) -> Result<(), Error> { let monitor_timeout = STATS_TIMEOUT_DEFAULT; loop { - // log::info!("Starting another fuzz_loop"); manager.maybe_report_progress(state, monitor_timeout)?; + self.fuzz_one(stages, executor, state, manager)?; } } @@ -247,7 +275,6 @@ where let monitor_timeout = STATS_TIMEOUT_DEFAULT; for _ in 0..iters { - // log::info!("Starting another fuzz_loop"); manager.maybe_report_progress(state, monitor_timeout)?; ret = Some(self.fuzz_one(stages, executor, state, manager)?); } @@ -356,25 +383,24 @@ pub enum ExecuteInputResult { /// Your default fuzzer instance, for everyday use. #[derive(Debug)] -pub struct StdFuzzer { +pub struct StdFuzzer { scheduler: CS, feedback: F, objective: OF, - phantom: PhantomData, + phantom: PhantomData, } -impl UsesState for StdFuzzer +impl UsesState for StdFuzzer where - CS: Scheduler, - CS::State: HasCorpus, + S: State, { - type State = CS::State; + type State = S; } -impl HasScheduler for StdFuzzer +impl HasScheduler for StdFuzzer where - CS: Scheduler, - CS::State: HasCorpus, + S: State + HasCorpus, + CS: Scheduler, { type Scheduler = CS; @@ -387,12 +413,9 @@ where } } -impl HasFeedback for StdFuzzer +impl HasFeedback for StdFuzzer where - CS: Scheduler, - F: Feedback, - OF: Feedback, - CS::State: HasCorpus, + S: State, { type Feedback = F; @@ -405,12 +428,9 @@ where } } -impl HasObjective for StdFuzzer +impl HasObjective for StdFuzzer where - CS: Scheduler, - F: Feedback, - OF: Feedback, - CS::State: HasCorpus, + S: State, { type Objective = OF; @@ -423,30 +443,26 @@ where } } -impl ExecutionProcessor for StdFuzzer +impl ExecutionProcessor for StdFuzzer where - CS: Scheduler, - F: Feedback, - OF: Feedback, - OT: ObserversTuple + Serialize + DeserializeOwned, - CS::State: HasCorpus - + HasSolutions - + HasExecutions - + HasCorpus - + HasImported - + HasCurrentTestcase<::Input> - + HasCurrentCorpusId, + CS: Scheduler, + F: Feedback, + OF: Feedback, + S: HasCorpus + HasSolutions + HasExecutions + HasCorpus + HasCurrentCorpusId + State, + S::Corpus: Corpus, //delete me + S::Solutions: Corpus, //delete me { - fn execute_no_process( + fn check_results( &mut self, - state: &mut Self::State, + state: &mut S, manager: &mut EM, - input: &::Input, + input: &S::Input, observers: &OT, exit_kind: &ExitKind, ) -> Result where EM: EventFirer, + OT: ObserversTuple, { let mut res = ExecuteInputResult::None; @@ -480,7 +496,7 @@ where Ok(res) } - fn execute_and_process( + fn evaluate_execution( &mut self, state: &mut Self::State, manager: &mut EM, @@ -491,22 +507,17 @@ where ) -> Result<(ExecuteInputResult, Option), Error> where EM: EventFirer, + OT: ObserversTuple + Serialize, { - let exec_res = self.execute_no_process(state, manager, &input, observers, exit_kind)?; - let corpus_idx = self.process_execution( - state, - manager, - input, - &exec_res, - observers, - exit_kind, - send_events, - )?; - Ok((exec_res, corpus_idx)) + let exec_res = self.check_results(state, manager, &input, observers, exit_kind)?; + let corpus_id = self.process_execution(state, manager, &input, &exec_res, observers)?; + if send_events { + self.serialize_and_dispatch(state, manager, input, &exec_res, observers, exit_kind)?; + } + Ok((exec_res, corpus_id)) } - /// Evaluate if a set of observation channels has an interesting state - fn process_execution( + fn serialize_and_dispatch( &mut self, state: &mut Self::State, manager: &mut EM, @@ -514,38 +525,48 @@ where exec_res: &ExecuteInputResult, observers: &OT, exit_kind: &ExitKind, - send_events: bool, - ) -> Result, Error> + ) -> Result<(), Error> where EM: EventFirer, + OT: ObserversTuple + Serialize, { - match exec_res { - ExecuteInputResult::None => { - self.feedback_mut().discard_metadata(state, &input)?; - self.objective_mut().discard_metadata(state, &input)?; - Ok(None) - } + // Now send off the event + let observers_buf = match exec_res { ExecuteInputResult::Corpus => { - // Not a solution - self.objective_mut().discard_metadata(state, &input)?; - - // Add the input to the main corpus - let mut testcase = Testcase::with_executions(input.clone(), *state.executions()); - #[cfg(feature = "track_hit_feedbacks")] - self.feedback_mut() - .append_hit_feedbacks(testcase.hit_feedbacks_mut())?; - self.feedback_mut() - .append_metadata(state, manager, observers, &mut testcase)?; - let idx = state.corpus_mut().add(testcase)?; - self.scheduler_mut().on_add(state, idx)?; - - if send_events && manager.should_send() { + if manager.should_send() { // TODO set None for fast targets - let observers_buf = if manager.configuration() == EventConfig::AlwaysUnique { + if manager.configuration() == EventConfig::AlwaysUnique { None } else { manager.serialize_observers::(observers)? - }; + } + } else { + None + } + } + _ => None, + }; + + self.dispatch_event(state, manager, input, exec_res, observers_buf, exit_kind)?; + Ok(()) + } + + fn dispatch_event( + &mut self, + state: &mut Self::State, + manager: &mut EM, + input: ::Input, + exec_res: &ExecuteInputResult, + observers_buf: Option>, + exit_kind: &ExitKind, + ) -> Result<(), Error> + where + EM: EventFirer, + { + // Now send off the event + match exec_res { + ExecuteInputResult::Corpus => { + if manager.should_send() { manager.fire( state, Event::NewTestcase { @@ -555,23 +576,70 @@ where corpus_size: state.corpus().count(), client_config: manager.configuration(), time: current_time(), - executions: *state.executions(), forward_id: None, + #[cfg(all(unix, feature = "std", feature = "multi_machine"))] + node_id: None, }, )?; - } else { - // This testcase is from the other fuzzers. - *state.imported_mut() += 1; } - Ok(Some(idx)) + } + ExecuteInputResult::Solution => { + if manager.should_send() { + manager.fire( + state, + Event::Objective { + objective_size: state.solutions().count(), + time: current_time(), + }, + )?; + } + } + ExecuteInputResult::None => (), + } + Ok(()) + } + + /// Evaluate if a set of observation channels has an interesting state + fn process_execution( + &mut self, + state: &mut Self::State, + manager: &mut EM, + input: &S::Input, + exec_res: &ExecuteInputResult, + observers: &OT, + ) -> Result, Error> + where + EM: EventFirer, + OT: ObserversTuple, + { + match exec_res { + ExecuteInputResult::None => { + self.feedback_mut().discard_metadata(state, input)?; + self.objective_mut().discard_metadata(state, input)?; + Ok(None) + } + ExecuteInputResult::Corpus => { + // Not a solution + self.objective_mut().discard_metadata(state, input)?; + + // Add the input to the main corpus + let mut testcase = Testcase::from(input.clone()); + #[cfg(feature = "track_hit_feedbacks")] + self.feedback_mut() + .append_hit_feedbacks(testcase.hit_feedbacks_mut())?; + self.feedback_mut() + .append_metadata(state, manager, observers, &mut testcase)?; + let id = state.corpus_mut().add(testcase)?; + self.scheduler_mut().on_add(state, id)?; + + Ok(Some(id)) } ExecuteInputResult::Solution => { // Not interesting - self.feedback_mut().discard_metadata(state, &input)?; + self.feedback_mut().discard_metadata(state, input)?; - let executions = *state.executions(); // The input is a solution, add it to the respective corpus - let mut testcase = Testcase::with_executions(input, executions); + let mut testcase = Testcase::from(input.clone()); testcase.set_parent_id_optional(*state.corpus().current()); if let Ok(mut tc) = state.current_testcase_mut() { tc.found_objective(); @@ -583,63 +651,56 @@ where .append_metadata(state, manager, observers, &mut testcase)?; state.solutions_mut().add(testcase)?; - if send_events { - manager.fire( - state, - Event::Objective { - objective_size: state.solutions().count(), - executions, - time: current_time(), - }, - )?; - } - Ok(None) } } } } -impl EvaluatorObservers for StdFuzzer +impl EvaluatorObservers for StdFuzzer where - CS: Scheduler, - OT: ObserversTuple + Serialize + DeserializeOwned, - F: Feedback, - OF: Feedback, - CS::State: HasCorpus + HasSolutions + HasExecutions + HasImported, + CS: Scheduler, + OT: ObserversTuple + Serialize + DeserializeOwned, + F: Feedback, + OF: Feedback, + S: HasCorpus + HasSolutions + HasExecutions + State, + S::Corpus: Corpus, //delete me + S::Solutions: Corpus, //delete me { /// Process one input, adding to the respective corpora if needed and firing the right events #[inline] - fn evaluate_input_with_observers( + fn evaluate_input_with_observers( &mut self, - state: &mut Self::State, + state: &mut S, executor: &mut E, manager: &mut EM, - input: ::Input, + input: S::Input, send_events: bool, ) -> Result<(ExecuteInputResult, Option), Error> where - E: Executor + HasObservers, - EM: EventFirer, + E: Executor + HasObservers, + EM: EventFirer, { let exit_kind = self.execute_input(state, executor, manager, &input)?; let observers = executor.observers(); self.scheduler.on_evaluation(state, &input, &*observers)?; - self.execute_and_process(state, manager, input, &*observers, &exit_kind, send_events) + self.evaluate_execution(state, manager, input, &*observers, &exit_kind, send_events) } } -impl Evaluator for StdFuzzer +impl Evaluator for StdFuzzer where - CS: Scheduler, - E: HasObservers + Executor, - EM: EventFirer, - F: Feedback, - OF: Feedback, - OT: ObserversTuple + Serialize + DeserializeOwned, - CS::State: HasCorpus + HasSolutions + HasExecutions + HasImported, + CS: Scheduler, + E: HasObservers + Executor, + E::Observers: ObserversTuple + Serialize + DeserializeOwned, + EM: EventFirer, + F: Feedback, + OF: Feedback, + S: HasCorpus + HasSolutions + HasExecutions + HasLastFoundTime + State, + S::Corpus: Corpus, //delete me + S::Solutions: Corpus, //delete me { /// Process one input, adding to the respective corpora if needed and firing the right events #[inline] @@ -658,11 +719,11 @@ where state: &mut Self::State, input: ::Input, ) -> Result { - let mut testcase = Testcase::with_executions(input.clone(), *state.executions()); + let mut testcase = Testcase::from(input.clone()); testcase.set_disabled(true); // Add the disabled input to the main corpus - let idx = state.corpus_mut().add_disabled(testcase)?; - Ok(idx) + let id = state.corpus_mut().add_disabled(testcase)?; + Ok(id) } /// Adds an input, even if it's not considered `interesting` by any of the executors fn add_input( @@ -672,14 +733,16 @@ where manager: &mut EM, input: ::Input, ) -> Result { + *state.last_found_time_mut() = current_time(); + let exit_kind = self.execute_input(state, executor, manager, &input)?; let observers = executor.observers(); // Always consider this to be "interesting" - let mut testcase = Testcase::with_executions(input.clone(), *state.executions()); + let mut testcase = Testcase::from(input.clone()); // Maybe a solution #[cfg(not(feature = "introspection"))] - let is_solution = + let is_solution: bool = self.objective_mut() .is_interesting(state, manager, &input, &*observers, &exit_kind)?; @@ -698,18 +761,16 @@ where .append_hit_feedbacks(testcase.hit_objectives_mut())?; self.objective_mut() .append_metadata(state, manager, &*observers, &mut testcase)?; - let idx = state.solutions_mut().add(testcase)?; + let id = state.solutions_mut().add(testcase)?; - let executions = *state.executions(); manager.fire( state, Event::Objective { objective_size: state.solutions().count(), - executions, time: current_time(), }, )?; - return Ok(idx); + return Ok(id); } // Not a solution @@ -737,13 +798,13 @@ where // Add the input to the main corpus self.feedback_mut() .append_metadata(state, manager, &*observers, &mut testcase)?; - let idx = state.corpus_mut().add(testcase)?; - self.scheduler_mut().on_add(state, idx)?; + let id = state.corpus_mut().add(testcase)?; + self.scheduler_mut().on_add(state, id)?; let observers_buf = if manager.configuration() == EventConfig::AlwaysUnique { None } else { - manager.serialize_observers::(&*observers)? + manager.serialize_observers::(&*observers)? }; manager.fire( state, @@ -754,30 +815,29 @@ where corpus_size: state.corpus().count(), client_config: manager.configuration(), time: current_time(), - executions: *state.executions(), forward_id: None, + #[cfg(all(unix, feature = "std", feature = "multi_machine"))] + node_id: None, }, )?; - Ok(idx) + Ok(id) } } -impl Fuzzer for StdFuzzer +impl Fuzzer for StdFuzzer where - CS: Scheduler, - E: UsesState, - EM: ProgressReporter + EventProcessor, - F: Feedback, - OF: Feedback, - CS::State: HasExecutions + CS: Scheduler, + E: UsesState, + EM: ProgressReporter + EventProcessor, + S: HasExecutions + HasMetadata + HasCorpus - + HasTestcase - + HasImported + HasLastReportTime + + HasTestcase + HasCurrentCorpusId - + HasCurrentStage, - ST: StagesTuple, + + HasCurrentStageId + + State, + ST: StagesTuple, { fn fuzz_one( &mut self, @@ -791,12 +851,12 @@ where state.introspection_monitor_mut().start_timer(); // Get the next index from the scheduler - let idx = if let Some(idx) = state.current_corpus_id()? { - idx // we are resuming + let id = if let Some(id) = state.current_corpus_id()? { + id // we are resuming } else { - let idx = self.scheduler.next(state)?; - state.set_corpus_idx(idx)?; // set up for resume - idx + let id = self.scheduler.next(state)?; + state.set_corpus_id(id)?; // set up for resume + id }; // Mark the elapsed time for the scheduler @@ -822,25 +882,29 @@ where state.introspection_monitor_mut().mark_manager_time(); { - if let Ok(mut testcase) = state.testcase_mut(idx) { + if let Ok(mut testcase) = state.testcase_mut(id) { let scheduled_count = testcase.scheduled_count(); // increase scheduled count, this was fuzz_level in afl testcase.set_scheduled_count(scheduled_count + 1); } } - state.clear_corpus_idx()?; + state.clear_corpus_id()?; - Ok(idx) + if state.stop_requested() { + state.discard_stop_request(); + manager.on_shutdown()?; + return Err(Error::shutting_down()); + } + + Ok(id) } } -impl StdFuzzer +impl StdFuzzer where - CS: Scheduler, - F: Feedback<::State>, - OF: Feedback<::State>, - CS::State: UsesInput + HasExecutions + HasCorpus, + CS: Scheduler, + S: UsesInput + HasExecutions + HasCorpus + State, { /// Create a new `StdFuzzer` with standard behavior. pub fn new(scheduler: CS, feedback: F, objective: OF) -> Self { @@ -861,9 +925,9 @@ where input: &<::State as UsesInput>::Input, ) -> Result where - E: Executor + HasObservers::State>, + E: Executor::State> + HasObservers, + E::Observers: ObserversTuple<::Input, ::State>, EM: UsesState::State>, - OT: ObserversTuple<::State>, { start_timer!(state); executor.observers_mut().pre_exec_all(state, input)?; @@ -899,22 +963,21 @@ where ) -> Result; } -impl ExecutesInput for StdFuzzer +impl ExecutesInput for StdFuzzer where - CS: Scheduler, - F: Feedback<::State>, - OF: Feedback<::State>, - E: Executor + HasObservers, + CS: Scheduler, + E: Executor + HasObservers, + E::Observers: ObserversTuple<::Input, ::State>, EM: UsesState, - CS::State: UsesInput + HasExecutions + HasCorpus, + S: UsesInput + HasExecutions + HasCorpus + State, { /// Runs the input and triggers observers and feedback fn execute_input( &mut self, - state: &mut ::State, + state: &mut S, executor: &mut E, event_mgr: &mut EM, - input: &<::State as UsesInput>::Input, + input: &S::Input, ) -> Result { start_timer!(state); executor.observers_mut().pre_exec_all(state, input)?; @@ -934,62 +997,49 @@ where } } -#[cfg(test)] -pub mod test { - use core::marker::PhantomData; +/// A [`NopFuzzer`] that does nothing +#[derive(Clone, Debug)] +pub struct NopFuzzer { + phantom: PhantomData, +} - use libafl_bolts::Error; - - use crate::{ - corpus::CorpusId, - events::ProgressReporter, - stages::{HasCurrentStage, StagesTuple}, - state::{HasExecutions, HasLastReportTime, State, UsesState}, - Fuzzer, HasMetadata, - }; - - #[derive(Clone, Debug)] - pub struct NopFuzzer { - phantom: PhantomData, - } - - impl NopFuzzer { - #[must_use] - pub fn new() -> Self { - Self { - phantom: PhantomData, - } - } - } - - impl Default for NopFuzzer { - fn default() -> Self { - Self::new() - } - } - - impl UsesState for NopFuzzer - where - S: State, - { - type State = S; - } - - impl Fuzzer for NopFuzzer - where - E: UsesState, - EM: ProgressReporter, - ST: StagesTuple, - Self::State: HasMetadata + HasExecutions + HasLastReportTime + HasCurrentStage, - { - fn fuzz_one( - &mut self, - _stages: &mut ST, - _executor: &mut E, - _state: &mut EM::State, - _manager: &mut EM, - ) -> Result { - unimplemented!() +impl NopFuzzer { + /// Creates a new [`NopFuzzer`] + #[must_use] + pub fn new() -> Self { + Self { + phantom: PhantomData, } } } + +impl Default for NopFuzzer { + fn default() -> Self { + Self::new() + } +} + +impl UsesState for NopFuzzer +where + S: State, +{ + type State = S; +} + +impl Fuzzer for NopFuzzer +where + E: UsesState, + EM: ProgressReporter + EventProcessor, + ST: StagesTuple, + Self::State: HasMetadata + HasExecutions + HasLastReportTime + HasCurrentStageId, +{ + fn fuzz_one( + &mut self, + _stages: &mut ST, + _executor: &mut E, + _state: &mut EM::State, + _manager: &mut EM, + ) -> Result { + unimplemented!("NopFuzzer cannot fuzz"); + } +} diff --git a/libafl/src/generators/gramatron.rs b/libafl/src/generators/gramatron.rs index 9bec0e4dd6..f8677e0fcf 100644 --- a/libafl/src/generators/gramatron.rs +++ b/libafl/src/generators/gramatron.rs @@ -1,6 +1,6 @@ //! Gramatron generator use alloc::{string::String, vec::Vec}; -use core::marker::PhantomData; +use core::{marker::PhantomData, num::NonZero}; use libafl_bolts::rands::Rand; use serde::{Deserialize, Serialize}; @@ -34,15 +34,12 @@ pub struct Automaton { #[derive(Clone, Debug)] /// Generates random inputs from a grammar automaton -pub struct GramatronGenerator<'a, S> -where - S: HasRand, -{ +pub struct GramatronGenerator<'a, S> { automaton: &'a Automaton, phantom: PhantomData, } -impl<'a, S> Generator for GramatronGenerator<'a, S> +impl Generator for GramatronGenerator<'_, S> where S: HasRand, { @@ -76,13 +73,21 @@ where .last() .map_or(self.automaton.init_state, |last| { let triggers = &self.automaton.pda[last.state]; - let idx = state.rand_mut().below(triggers.len()); + let idx = state.rand_mut().below( + NonZero::new(triggers.len()) + .expect("Triggers are empty in append_generated_terminals!"), + ); triggers[idx].dest }); while current_state != final_state { let triggers = &self.automaton.pda[current_state]; - let idx = state.rand_mut().below(triggers.len()); + let idx = + state + .rand_mut() + .below(NonZero::new(triggers.len()).expect( + "Automation.pda triggers are empty in append_generated_terminals!", + )); let trigger = &triggers[idx]; input .terminals_mut() diff --git a/libafl/src/generators/mod.rs b/libafl/src/generators/mod.rs index eeede7528d..ce0ce4607f 100644 --- a/libafl/src/generators/mod.rs +++ b/libafl/src/generators/mod.rs @@ -1,17 +1,15 @@ //! Generators may generate bytes or, in general, data, for inputs. use alloc::vec::Vec; -use core::marker::PhantomData; +use core::{marker::PhantomData, num::NonZeroUsize}; use libafl_bolts::rands::Rand; -use crate::{ - inputs::{bytes::BytesInput, Input}, - state::HasRand, - Error, -}; +use crate::{inputs::bytes::BytesInput, nonzero, state::HasRand, Error}; pub mod gramatron; +use core::cmp::max; + pub use gramatron::*; #[cfg(feature = "nautilus")] @@ -20,10 +18,7 @@ pub mod nautilus; pub use nautilus::*; /// Generators can generate ranges of bytes. -pub trait Generator -where - I: Input, -{ +pub trait Generator { /// Generate a new input fn generate(&mut self, state: &mut S) -> Result; } @@ -35,7 +30,6 @@ where impl Generator for T where T: Iterator, - I: Input, { fn generate(&mut self, _state: &mut S) -> Result { match self.next() { @@ -49,21 +43,13 @@ where /// An [`Iterator`] built from a [`Generator`]. #[derive(Debug)] -pub struct GeneratorIter<'a, I, S, G> -where - I: Input, - G: Generator, -{ +pub struct GeneratorIter<'a, I, S, G> { gen: G, state: &'a mut S, phantom: PhantomData, } -impl<'a, I, S, G> GeneratorIter<'a, I, S, G> -where - I: Input, - G: Generator, -{ +impl<'a, I, S, G> GeneratorIter<'a, I, S, G> { /// Create a new [`GeneratorIter`] pub fn new(gen: G, state: &'a mut S) -> Self { Self { @@ -74,9 +60,8 @@ where } } -impl<'a, I, S, G> Iterator for GeneratorIter<'a, I, S, G> +impl Iterator for GeneratorIter<'_, I, S, G> where - I: Input, G: Generator, { type Item = I; @@ -88,63 +73,60 @@ where #[derive(Clone, Debug)] /// Generates random bytes -pub struct RandBytesGenerator -where - S: HasRand, -{ - max_size: usize, - phantom: PhantomData, +pub struct RandBytesGenerator { + min_size: NonZeroUsize, + max_size: NonZeroUsize, } -impl Generator for RandBytesGenerator +impl Generator for RandBytesGenerator where S: HasRand, { fn generate(&mut self, state: &mut S) -> Result { - let mut size = state.rand_mut().below(self.max_size); - if size == 0 { - size = 1; - } + let mut size = state + .rand_mut() + .between(self.min_size.get(), self.max_size.get()); + size = max(size, 1); let random_bytes: Vec = (0..size) - .map(|_| state.rand_mut().below(256) as u8) + .map(|_| state.rand_mut().below(nonzero!(256)) as u8) .collect(); Ok(BytesInput::new(random_bytes)) } } -impl RandBytesGenerator -where - S: HasRand, -{ +impl RandBytesGenerator { /// Returns a new [`RandBytesGenerator`], generating up to `max_size` random bytes. #[must_use] - pub fn new(max_size: usize) -> Self { + pub fn new(max_size: NonZeroUsize) -> Self { Self { + min_size: nonzero!(1), max_size, - phantom: PhantomData, } } + + /// Returns a new [`RandBytesGenerator`], generating from `min_size` up to `max_size` random bytes. + #[must_use] + pub fn with_min_size(min_size: NonZeroUsize, max_size: NonZeroUsize) -> Self { + Self { min_size, max_size } + } } #[derive(Clone, Debug)] /// Generates random printable characters -pub struct RandPrintablesGenerator -where - S: HasRand, -{ - max_size: usize, - phantom: PhantomData, +pub struct RandPrintablesGenerator { + min_size: NonZeroUsize, + max_size: NonZeroUsize, } -impl Generator for RandPrintablesGenerator +impl Generator for RandPrintablesGenerator where S: HasRand, { fn generate(&mut self, state: &mut S) -> Result { - let mut size = state.rand_mut().below(self.max_size); - if size == 0 { - size = 1; - } + let mut size = state + .rand_mut() + .between(self.min_size.get(), self.max_size.get()); + size = max(size, 1); let printables = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz \t\n!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~".as_bytes(); let random_bytes: Vec = (0..size) .map(|_| *state.rand_mut().choose(printables).unwrap()) @@ -153,16 +135,19 @@ where } } -impl RandPrintablesGenerator -where - S: HasRand, -{ - /// Creates a new [`RandPrintablesGenerator`], generating up to `max_size` random printable characters. +impl RandPrintablesGenerator { + /// Returns a new [`RandBytesGenerator`], generating up to `max_size` random bytes. #[must_use] - pub fn new(max_size: usize) -> Self { + pub fn new(max_size: NonZeroUsize) -> Self { Self { + min_size: nonzero!(1), max_size, - phantom: PhantomData, } } + + /// Returns a new [`RandPrintablesGenerator`], generating from `min_size` up to `max_size` random bytes. + #[must_use] + pub fn with_min_size(min_size: NonZeroUsize, max_size: NonZeroUsize) -> Self { + Self { min_size, max_size } + } } diff --git a/libafl/src/generators/nautilus.rs b/libafl/src/generators/nautilus.rs index f905afea24..1b4b870996 100644 --- a/libafl/src/generators/nautilus.rs +++ b/libafl/src/generators/nautilus.rs @@ -11,7 +11,8 @@ use libafl_bolts::rands::Rand; pub use crate::common::nautilus::grammartec::newtypes::NTermId; use crate::{ common::nautilus::grammartec::context::Context, generators::Generator, - inputs::nautilus::NautilusInput, state::HasRand, Error, + inputs::nautilus::NautilusInput, nautilus::grammartec::python_grammar_loader, state::HasRand, + Error, }; /// The nautilus context for a generator @@ -84,13 +85,25 @@ impl NautilusContext { } /// Create a new [`NautilusContext`] from a file - #[must_use] - pub fn from_file>(tree_depth: usize, grammar_file: P) -> Self { - let file = fs::File::open(grammar_file).expect("Cannot open grammar file"); + pub fn from_file>(tree_depth: usize, grammar_file: P) -> Result { + let grammar_file = grammar_file.as_ref(); + if grammar_file.extension().unwrap_or_default() == "py" { + log::debug!("Creating NautilusContext from python grammar"); + let mut ctx = python_grammar_loader::load_python_grammar( + fs::read_to_string(grammar_file)?.as_str(), + ); + ctx.initialize(tree_depth); + return Ok(Self { ctx }); + } + log::debug!("Creating NautilusContext from json grammar"); + let file = fs::File::open(grammar_file)?; let reader = BufReader::new(file); - let rules: Vec> = - serde_json::from_reader(reader).expect("Cannot parse grammar file"); - Self::new(tree_depth, &rules) + let rules: Vec> = serde_json::from_reader(reader).map_err(|err| { + Error::illegal_argument(format!( + "Error loading context from json grammar file {grammar_file:?}: {err:?}" + )) + })?; + Ok(Self::new(tree_depth, &rules)) } } @@ -107,7 +120,7 @@ impl Debug for NautilusGenerator<'_> { } } -impl<'a, S: HasRand> Generator for NautilusGenerator<'a> { +impl Generator for NautilusGenerator<'_> { fn generate(&mut self, state: &mut S) -> Result { let nonterm = self.nonterminal("START"); let len = self.ctx.get_random_len_for_nt(&nonterm); diff --git a/libafl/src/inputs/bytes.rs b/libafl/src/inputs/bytes.rs index 4fa9e0143b..127e672953 100644 --- a/libafl/src/inputs/bytes.rs +++ b/libafl/src/inputs/bytes.rs @@ -15,7 +15,10 @@ use libafl_bolts::{fs::write_file_atomic, Error}; use libafl_bolts::{ownedref::OwnedSlice, HasLen}; use serde::{Deserialize, Serialize}; -use crate::inputs::{HasMutatorBytes, HasTargetBytes, Input}; +use crate::{ + corpus::CorpusId, + inputs::{HasMutatorBytes, HasTargetBytes, Input}, +}; /// A bytes input is the basic input #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq, Hash)] @@ -47,7 +50,7 @@ impl Input for BytesInput { } /// Generate a name for this input - fn generate_name(&self, _idx: usize) -> String { + fn generate_name(&self, _id: Option) -> String { let mut hasher = RandomState::with_seeds(0, 0, 0, 0).build_hasher(); hasher.write(self.bytes()); format!("{:016x}", hasher.finish()) diff --git a/libafl/src/inputs/bytessub.rs b/libafl/src/inputs/bytessub.rs index 262e7ff7dc..0dcd104a59 100644 --- a/libafl/src/inputs/bytessub.rs +++ b/libafl/src/inputs/bytessub.rs @@ -1,41 +1,21 @@ //! [`BytesSubInput`] is a wrapper input that can be used to mutate parts of a byte slice -use alloc::vec::Vec; +use alloc::vec::{self, Vec}; use core::{ - cmp::{min, Ordering}, - ops::{Bound, Range, RangeBounds}, + cmp::Ordering, + ops::{Range, RangeBounds}, }; -use libafl_bolts::HasLen; +use libafl_bolts::{ + subrange::{end_index, start_index, sub_range}, + HasLen, +}; -use super::HasMutatorBytes; - -/// Gets the relevant concrete start index from [`RangeBounds`] (inclusive) -fn start_index(range: &R) -> usize -where - R: RangeBounds, -{ - match range.start_bound() { - Bound::Unbounded => 0, - Bound::Included(start) => *start, - Bound::Excluded(start) => start + 1, - } -} - -/// Gets the relevant concrete end index from [`RangeBounds`] (exclusive) -fn end_index(range: &R, max_len: usize) -> usize -where - R: RangeBounds, -{ - match range.end_bound() { - Bound::Unbounded => max_len, - Bound::Included(end) => end + 1, - Bound::Excluded(end) => *end, - } -} +use crate::inputs::{HasMutatorBytes, MappedInput}; /// The [`BytesSubInput`] makes it possible to use [`crate::mutators::Mutator`]`s` that work on /// inputs implementing the [`HasMutatorBytes`] for a sub-range of this input. +/// /// For example, we can do the following: /// ```rust /// # extern crate alloc; @@ -85,12 +65,9 @@ where /// assert_eq!(bytes_input.bytes(), [1, 2, 3, 4, 42, 42, 42, 5]); /// ``` /// -/// The input supports all methods in the [`HasMutatorBytes`] trait. +/// The input supports all methods in the [`HasMutatorBytes`] trait if the parent input also implements this trait. #[derive(Debug)] -pub struct BytesSubInput<'a, I> -where - I: HasMutatorBytes + ?Sized, -{ +pub struct BytesSubInput<'a, I: ?Sized> { /// The (complete) parent input we will work on pub(crate) parent_input: &'a mut I, /// The range inside the parent input we will work on @@ -99,7 +76,7 @@ where impl<'a, I> BytesSubInput<'a, I> where - I: HasMutatorBytes + ?Sized + HasLen, + I: HasMutatorBytes + ?Sized, { /// Creates a new [`BytesSubInput`] that's a view on an input with mutator bytes. /// The sub input can then be used to mutate parts of the original input. @@ -117,65 +94,11 @@ where }, } } - - /// The inclusive start index in the parent buffer - fn start_index(&self) -> usize { - self.range.start - } - - /// The exclusive end index in the parent buffer - fn end_index(&self) -> usize { - self.range.end - } - - /// Creates a sub range in the current own range - fn sub_range(&self, range: R2) -> (Bound, Bound) - where - R2: RangeBounds, - { - let start = match (self.range.start_bound(), range.start_bound()) { - (Bound::Unbounded, Bound::Unbounded) => Bound::Unbounded, - (Bound::Excluded(bound), Bound::Unbounded) - | (Bound::Unbounded, Bound::Excluded(bound)) => Bound::Excluded(*bound), - (Bound::Included(bound), Bound::Unbounded) - | (Bound::Unbounded, Bound::Included(bound)) => Bound::Included(*bound), - (Bound::Included(own), Bound::Included(other)) => Bound::Included(own + other), - (Bound::Included(own), Bound::Excluded(other)) - | (Bound::Excluded(own), Bound::Included(other)) => Bound::Excluded(own + other), - (Bound::Excluded(own), Bound::Excluded(other)) => Bound::Excluded(own + other + 1), - }; - - let end = match (self.range.end_bound(), range.end_bound()) { - (Bound::Unbounded, Bound::Unbounded) => Bound::Unbounded, - (Bound::Excluded(bound), Bound::Unbounded) => Bound::Excluded(*bound), - (Bound::Unbounded, Bound::Excluded(bound)) => { - Bound::Excluded(self.end_index() - *bound) - } - (Bound::Included(bound), Bound::Unbounded) => Bound::Included(*bound), - (Bound::Unbounded, Bound::Included(bound)) => { - Bound::Included(self.end_index() - *bound) - } - (Bound::Included(own), Bound::Included(other)) => { - Bound::Included(min(*own, self.start_index() + other)) - } - (Bound::Included(own), Bound::Excluded(other)) => { - Bound::Included(min(*own, self.start_index() + other - 1)) - } - (Bound::Excluded(own), Bound::Included(other)) => { - Bound::Included(min(*own - 1, self.start_index() + other)) - } - (Bound::Excluded(own), Bound::Excluded(other)) => { - Bound::Excluded(min(*own, self.start_index() + other)) - } - }; - - (start, end) - } } -impl<'a, I> HasMutatorBytes for BytesSubInput<'a, I> +impl HasMutatorBytes for BytesSubInput<'_, I> where - I: HasMutatorBytes + HasLen, + I: HasMutatorBytes, { #[inline] fn bytes(&self) -> &[u8] { @@ -188,8 +111,8 @@ where } fn resize(&mut self, new_len: usize, value: u8) { - let start_index = self.start_index(); - let end_index = self.end_index(); + let start_index = self.range.start; + let end_index = self.range.end; let old_len = end_index - start_index; match new_len.cmp(&old_len) { @@ -238,7 +161,7 @@ where } fn extend<'b, IT: IntoIterator>(&mut self, iter: IT) { - let old_len = self.end_index() - self.start_index(); + let old_len = self.len(); let new_values: Vec = iter.into_iter().copied().collect(); self.resize(old_len + new_values.len(), 0); @@ -249,39 +172,43 @@ where /// with the given `replace_with` iterator and yields the removed items. /// `replace_with` does not need to be the same length as range. /// Refer to the docs of [`Vec::splice`] - fn splice( - &mut self, - range: R2, - replace_with: IT, - ) -> alloc::vec::Splice<'_, IT::IntoIter> + fn splice(&mut self, range: R2, replace_with: IT) -> vec::Splice<'_, IT::IntoIter> where R2: RangeBounds, IT: IntoIterator, { - let range = self.sub_range(range); + let range = sub_range(&self.range, range); self.parent_input.splice(range, replace_with) } - fn drain(&mut self, range: R2) -> alloc::vec::Drain<'_, u8> + fn drain(&mut self, range: R2) -> vec::Drain<'_, u8> where R2: RangeBounds, { - let drain = self.parent_input.drain(self.sub_range(range)); + let sub_range = sub_range(&self.range, range); + let drain = self.parent_input.drain(sub_range); self.range.end -= drain.len(); drain } } -impl<'a, I> HasLen for BytesSubInput<'a, I> +impl HasLen for BytesSubInput<'_, I> where - I: HasMutatorBytes + HasLen, + I: HasMutatorBytes, { #[inline] fn len(&self) -> usize { - self.range.end - self.range.start + self.range.len() } } +impl MappedInput for BytesSubInput<'_, I> { + type Type<'b> + = BytesSubInput<'b, I> + where + Self: 'b; +} + #[cfg(test)] mod tests { @@ -303,6 +230,20 @@ mod tests { #[test] fn test_bytessubinput() { + let (bytes_input, _) = init_bytes_input(); + + let sub_input = bytes_input.sub_bytes(0..1); + assert_eq!(*sub_input.as_slice(), [1]); + + let sub_input = bytes_input.sub_bytes(1..=2); + assert_eq!(*sub_input.as_slice(), [2, 3]); + + let sub_input = bytes_input.sub_bytes(..); + assert_eq!(*sub_input.as_slice(), [1, 2, 3, 4, 5, 6, 7]); + } + + #[test] + fn test_mutablebytessubinput() { let (mut bytes_input, len_orig) = init_bytes_input(); let mut sub_input = bytes_input.sub_input(0..1); @@ -413,14 +354,27 @@ mod tests { #[test] fn test_ranges() { + let bytes_input = BytesInput::new(vec![1, 2, 3]); + + assert_eq!(bytes_input.sub_bytes(..1).start_index(), 0); + assert_eq!(bytes_input.sub_bytes(1..=1).start_index(), 1); + assert_eq!(bytes_input.sub_bytes(..1).end_index(), 1); + assert_eq!(bytes_input.sub_bytes(..=1).end_index(), 2); + assert_eq!(bytes_input.sub_bytes(1..=1).end_index(), 2); + assert_eq!(bytes_input.sub_bytes(1..).end_index(), 3); + assert_eq!(bytes_input.sub_bytes(..3).end_index(), 3); + } + + #[test] + fn test_ranges_mut() { let mut bytes_input = BytesInput::new(vec![1, 2, 3]); - assert_eq!(bytes_input.sub_input(..1).start_index(), 0); - assert_eq!(bytes_input.sub_input(1..=1).start_index(), 1); - assert_eq!(bytes_input.sub_input(..1).end_index(), 1); - assert_eq!(bytes_input.sub_input(..=1).end_index(), 2); - assert_eq!(bytes_input.sub_input(1..=1).end_index(), 2); - assert_eq!(bytes_input.sub_input(1..).end_index(), 3); - assert_eq!(bytes_input.sub_input(..3).end_index(), 3); + assert_eq!(bytes_input.sub_bytes_mut(..1).start_index(), 0); + assert_eq!(bytes_input.sub_bytes_mut(1..=1).start_index(), 1); + assert_eq!(bytes_input.sub_bytes_mut(..1).end_index(), 1); + assert_eq!(bytes_input.sub_bytes_mut(..=1).end_index(), 2); + assert_eq!(bytes_input.sub_bytes_mut(1..=1).end_index(), 2); + assert_eq!(bytes_input.sub_bytes_mut(1..).end_index(), 3); + assert_eq!(bytes_input.sub_bytes_mut(..3).end_index(), 3); } } diff --git a/libafl/src/inputs/encoded.rs b/libafl/src/inputs/encoded.rs index f5b9c9ba59..e3f5897e18 100644 --- a/libafl/src/inputs/encoded.rs +++ b/libafl/src/inputs/encoded.rs @@ -1,5 +1,6 @@ -//! The `EncodedInput` is the "normal" input, a map of codes, that can be sent directly to the client -//! (As opposed to other, more abstract, inputs, like an Grammar-Based AST Input) +//! The `EncodedInput` is the "normal" input, a map of codes, that can be sent directly to the client. +//! +//! This is different to other, more abstract inputs, like an Grammar-Based AST Input. //! See also [the paper on token-level fuzzing](https://www.usenix.org/system/files/sec21-salls.pdf) #[cfg(feature = "regex")] @@ -19,7 +20,7 @@ use libafl_bolts::{Error, HasLen}; use regex::Regex; use serde::{Deserialize, Serialize}; -use crate::inputs::Input; +use crate::{corpus::CorpusId, inputs::Input}; /// Trait to encode bytes to an [`EncodedInput`] using the given [`Tokenizer`] pub trait InputEncoder @@ -202,7 +203,7 @@ pub struct EncodedInput { impl Input for EncodedInput { /// Generate a name for this input #[must_use] - fn generate_name(&self, _idx: usize) -> String { + fn generate_name(&self, _id: Option) -> String { let mut hasher = RandomState::with_seeds(0, 0, 0, 0).build_hasher(); for code in &self.codes { hasher.write(&code.to_le_bytes()); diff --git a/libafl/src/inputs/gramatron.rs b/libafl/src/inputs/gramatron.rs index 1b1577b7ef..443ee51ad3 100644 --- a/libafl/src/inputs/gramatron.rs +++ b/libafl/src/inputs/gramatron.rs @@ -9,7 +9,7 @@ use ahash::RandomState; use libafl_bolts::{Error, HasLen}; use serde::{Deserialize, Serialize}; -use crate::inputs::Input; +use crate::{corpus::CorpusId, inputs::Input}; /// A terminal for gramatron grammar fuzzing #[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq, Eq, Hash)] @@ -44,7 +44,7 @@ pub struct GramatronInput { impl Input for GramatronInput { /// Generate a name for this input #[must_use] - fn generate_name(&self, _idx: usize) -> String { + fn generate_name(&self, _id: Option) -> String { let mut hasher = RandomState::with_seeds(0, 0, 0, 0).build_hasher(); for term in &self.terms { hasher.write(term.symbol.as_bytes()); diff --git a/libafl/src/inputs/mod.rs b/libafl/src/inputs/mod.rs index 6269a9b645..240be96917 100644 --- a/libafl/src/inputs/mod.rs +++ b/libafl/src/inputs/mod.rs @@ -34,11 +34,17 @@ use std::{fs::File, hash::Hash, io::Read, path::Path}; #[cfg(feature = "std")] use libafl_bolts::fs::write_file_atomic; -use libafl_bolts::{ownedref::OwnedSlice, Error, HasLen}; +use libafl_bolts::{ + ownedref::{OwnedMutSlice, OwnedSlice}, + subrange::{SubRangeMutSlice, SubRangeSlice}, + Error, HasLen, +}; #[cfg(feature = "nautilus")] pub use nautilus::*; use serde::{Deserialize, Serialize}; +use crate::corpus::CorpusId; + /// An input for the target #[cfg(not(feature = "std"))] pub trait Input: Clone + Serialize + serde::de::DeserializeOwned + Debug { @@ -53,10 +59,7 @@ pub trait Input: Clone + Serialize + serde::de::DeserializeOwned + Debug { } /// Generate a name for this input - fn generate_name(&self, idx: usize) -> String; - - /// An hook executed if the input is stored as `Testcase` - fn wrapped_as_testcase(&mut self) {} + fn generate_name(&self, id: Option) -> String; } /// An input for the target @@ -82,18 +85,15 @@ pub trait Input: Clone + Serialize + serde::de::DeserializeOwned + Debug { } /// Generate a name for this input, the user is responsible for making each name of testcase unique. - fn generate_name(&self, idx: usize) -> String; - - /// An hook executed if the input is stored as `Testcase` - fn wrapped_as_testcase(&mut self) {} + fn generate_name(&self, id: Option) -> String; } /// Convert between two input types with a state pub trait InputConverter: Debug { /// Source type - type From: Input; + type From; /// Destination type - type To: Input; + type To; /// Convert the src type to the dest fn convert(&mut self, input: Self::From) -> Result; @@ -111,7 +111,7 @@ macro_rules! none_input_converter { #[derive(Copy, Clone, Serialize, Deserialize, Debug, Hash)] pub struct NopInput {} impl Input for NopInput { - fn generate_name(&self, _idx: usize) -> String { + fn generate_name(&self, _id: Option) -> String { "nop-input".to_string() } } @@ -121,7 +121,14 @@ impl HasTargetBytes for NopInput { } } +impl HasLen for NopInput { + fn len(&self) -> usize { + 0 + } +} + // TODO change this to fn target_bytes(&self, buffer: &mut Vec) -> &[u8]; +/// Has a byte representation intended for the target. /// Can be represented with a vector of bytes. /// This representation is not necessarily deserializable. /// Instead, it can be used as bytes input for a target @@ -130,7 +137,7 @@ pub trait HasTargetBytes { fn target_bytes(&self) -> OwnedSlice; } -/// Contains mutateable and resizable bytes +/// Contains mutable and resizable bytes pub trait HasMutatorBytes: HasLen { /// The bytes fn bytes(&self) -> &[u8]; @@ -140,23 +147,39 @@ pub trait HasMutatorBytes: HasLen { /// Resize the mutator bytes to a given new size. /// Use `value` to fill new slots in case the buffer grows. - /// See [`alloc::vec::Vec::splice`]. + /// See [`Vec::splice`]. fn resize(&mut self, new_len: usize, value: u8); /// Extends the given buffer with an iterator. See [`alloc::vec::Vec::extend`] fn extend<'a, I: IntoIterator>(&mut self, iter: I); - /// Splices the given target bytes according to [`alloc::vec::Vec::splice`]'s rules + /// Splices the given target bytes according to [`Vec::splice`]'s rules fn splice(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter> where R: RangeBounds, I: IntoIterator; - /// Drains the given target bytes according to [`alloc::vec::Vec::drain`]'s rules + /// Drains the given target bytes according to [`Vec::drain`]'s rules fn drain(&mut self, range: R) -> Drain<'_, u8> where R: RangeBounds; + /// Creates a [`SubRangeSlice`] from this input, that can be used to slice a byte array. + fn sub_bytes(&self, range: R) -> SubRangeSlice + where + R: RangeBounds, + { + SubRangeSlice::new(OwnedSlice::from(self.bytes()), range) + } + + /// Creates a [`SubRangeMutSlice`] from this input, that can be used to slice a byte array. + fn sub_bytes_mut(&mut self, range: R) -> SubRangeMutSlice + where + R: RangeBounds, + { + SubRangeMutSlice::new(OwnedMutSlice::from(self.bytes_mut()), range) + } + /// Creates a [`BytesSubInput`] from this input, that can be used for local mutations. fn sub_input(&mut self, range: R) -> BytesSubInput where @@ -166,6 +189,26 @@ pub trait HasMutatorBytes: HasLen { } } +/// Mapping types to themselves, used to ensure lifetime consistency for mapped mutators. +/// +/// Specifically, this is for [`Input`] types that are owned wrappers around a reference. The lifetime of the associated type should be the same as the reference. +pub trait MappedInput { + /// The type for which this trait is implemented + type Type<'a> + where + Self: 'a; +} + +impl MappedInput for Option +where + T: MappedInput, +{ + type Type<'a> + = Option> + where + T: 'a; +} + /// A wrapper type that allows us to use mutators for Mutators for `&mut `[`Vec`]. #[derive(Debug)] pub struct MutVecInput<'a>(&'a mut Vec); @@ -176,13 +219,13 @@ impl<'a> From<&'a mut Vec> for MutVecInput<'a> { } } -impl<'a> HasLen for MutVecInput<'a> { +impl HasLen for MutVecInput<'_> { fn len(&self) -> usize { self.0.len() } } -impl<'a> HasMutatorBytes for MutVecInput<'a> { +impl HasMutatorBytes for MutVecInput<'_> { fn bytes(&self) -> &[u8] { self.0 } @@ -215,6 +258,13 @@ impl<'a> HasMutatorBytes for MutVecInput<'a> { } } +impl MappedInput for MutVecInput<'_> { + type Type<'b> + = MutVecInput<'b> + where + Self: 'b; +} + /// Defines the input type shared across traits of the type. /// Needed for consistency across HasCorpus/HasSolutions and friends. pub trait UsesInput { @@ -292,3 +342,42 @@ where (self.convert_cb)(input) } } + +/// A converter that converts from `input` to target bytes +pub trait TargetBytesConverter { + /// The input + type Input; + + /// Create target bytes + fn to_target_bytes<'a>(&mut self, input: &'a Self::Input) -> OwnedSlice<'a, u8>; +} + +/// Simply gets the target bytes out from a [`HasTargetBytes`] type. +#[derive(Debug)] +pub struct NopTargetBytesConverter { + phantom: PhantomData, +} + +impl NopTargetBytesConverter { + /// Create a new [`NopTargetBytesConverter`] + #[must_use] + pub fn new() -> NopTargetBytesConverter { + Self { + phantom: PhantomData, + } + } +} + +impl Default for NopTargetBytesConverter { + fn default() -> Self { + Self::new() + } +} + +impl TargetBytesConverter for NopTargetBytesConverter { + type Input = I; + + fn to_target_bytes<'a>(&mut self, input: &'a Self::Input) -> OwnedSlice<'a, u8> { + input.target_bytes() + } +} diff --git a/libafl/src/inputs/multi.rs b/libafl/src/inputs/multi.rs index 750495ae64..7ebdd0f2c5 100644 --- a/libafl/src/inputs/multi.rs +++ b/libafl/src/inputs/multi.rs @@ -12,7 +12,7 @@ use alloc::{ use arrayvec::ArrayVec; use serde::{Deserialize, Serialize}; -use crate::inputs::Input; +use crate::{corpus::CorpusId, inputs::Input}; /// An input composed of multiple parts. Use in situations where subcomponents are not necessarily /// related, or represent distinct parts of the input. @@ -153,11 +153,11 @@ impl Input for MultipartInput where I: Input, { - fn generate_name(&self, idx: usize) -> String { + fn generate_name(&self, id: Option) -> String { self.names .iter() .cloned() - .zip(self.parts.iter().map(|i| i.generate_name(idx))) + .zip(self.parts.iter().map(|i| i.generate_name(id))) .map(|(name, generated)| format!("{name}-{generated}")) .collect::>() .join(",") diff --git a/libafl/src/inputs/nautilus.rs b/libafl/src/inputs/nautilus.rs index 0b4190de41..2a87a4d2be 100644 --- a/libafl/src/inputs/nautilus.rs +++ b/libafl/src/inputs/nautilus.rs @@ -1,22 +1,21 @@ //! Input for the [`Nautilus`](https://github.com/RUB-SysSec/nautilus) grammar fuzzer methods //! - -//use ahash::AHasher; -//use core::hash::Hasher; - +//! use alloc::{rc::Rc, string::String, vec::Vec}; use core::cell::RefCell; use std::hash::{Hash, Hasher}; -use libafl_bolts::HasLen; +use libafl_bolts::{ownedref::OwnedSlice, HasLen}; use serde::{Deserialize, Serialize}; +use super::TargetBytesConverter; use crate::{ common::nautilus::grammartec::{ newtypes::NodeId, rule::RuleIdOrCustom, tree::{Tree, TreeLike}, }, + corpus::CorpusId, generators::nautilus::NautilusContext, inputs::{BytesInput, Input, InputConverter}, Error, @@ -32,13 +31,18 @@ pub struct NautilusInput { impl Input for NautilusInput { /// Generate a name for this input #[must_use] - fn generate_name(&self, idx: usize) -> String { + fn generate_name(&self, id: Option) -> String { /*let mut hasher = AHasher::new_with_keys(0, 0); for term in &self.terms { hasher.write(term.symbol.as_bytes()); } format!("{:016x}", hasher.finish())*/ - format!("id:{idx}") + + if let Some(id) = id { + format!("id_{}", id.0) + } else { + "id_unknown".into() + } } } @@ -124,7 +128,7 @@ impl<'a> NautilusToBytesInputConverter<'a> { } } -impl<'a> InputConverter for NautilusToBytesInputConverter<'a> { +impl InputConverter for NautilusToBytesInputConverter<'_> { type From = NautilusInput; type To = BytesInput; @@ -134,3 +138,28 @@ impl<'a> InputConverter for NautilusToBytesInputConverter<'a> { Ok(BytesInput::new(bytes)) } } + +/// A converter to convert a nautilus context to target bytes +#[derive(Debug)] +pub struct NautilusTargetBytesConverter<'a> { + /// The Nautilus Context + ctx: &'a NautilusContext, +} + +impl<'a> NautilusTargetBytesConverter<'a> { + /// Create a new [`NautilusTargetBytesConverter`] + #[must_use] + pub fn new(ctx: &'a NautilusContext) -> NautilusTargetBytesConverter<'a> { + NautilusTargetBytesConverter { ctx } + } +} + +impl TargetBytesConverter for NautilusTargetBytesConverter<'_> { + type Input = NautilusInput; + + fn to_target_bytes<'a>(&mut self, input: &'a Self::Input) -> OwnedSlice<'a, u8> { + let mut bytes = Vec::new(); + input.unparse(self.ctx, &mut bytes); + OwnedSlice::from(bytes) + } +} diff --git a/libafl/src/lib.rs b/libafl/src/lib.rs index 88047386ae..2ed3ed5371 100644 --- a/libafl/src/lib.rs +++ b/libafl/src/lib.rs @@ -4,35 +4,11 @@ Welcome to `LibAFL` #![doc = include_str!("../README.md")] /*! */ #![cfg_attr(feature = "document-features", doc = document_features::document_features!())] -#![forbid(unexpected_cfgs)] -#![allow(incomplete_features)] #![no_std] // For `type_eq` #![cfg_attr(nightly, feature(specialization))] // For `std::simd` #![cfg_attr(nightly, feature(portable_simd))] -#![warn(clippy::cargo)] -#![allow(ambiguous_glob_reexports)] -#![deny(clippy::cargo_common_metadata)] -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(clippy::all)] -#![deny(clippy::pedantic)] -#![allow( - clippy::unreadable_literal, - clippy::type_repetition_in_bounds, - clippy::missing_errors_doc, - clippy::cast_possible_truncation, - clippy::used_underscore_binding, - clippy::ptr_as_ptr, - clippy::missing_panics_doc, - clippy::missing_docs_in_private_items, - clippy::module_name_repetitions, - clippy::ptr_cast_constness, - clippy::unsafe_derive_deserialize, - clippy::similar_names, - clippy::too_many_lines, - clippy::into_iter_without_iter, // broken -)] #![cfg_attr(not(test), warn( missing_debug_implementations, missing_docs, @@ -73,9 +49,6 @@ Welcome to `LibAFL` while_true ) )] -// Till they fix this buggy lint in clippy -#![allow(clippy::borrow_as_ptr)] -#![allow(clippy::borrow_deref_ref)] #[cfg(feature = "std")] #[macro_use] @@ -110,7 +83,7 @@ pub mod stages; pub mod state; pub use fuzzer::*; -pub use libafl_bolts::Error; +pub use libafl_bolts::{nonzero, Error}; /// The purpose of this module is to alleviate imports of many components by adding a glob import. #[cfg(feature = "prelude")] @@ -135,7 +108,10 @@ mod tests { #[cfg(miri)] use libafl_bolts::serdeany::RegistryBuilder; - use libafl_bolts::{rands::StdRand, tuples::tuple_list}; + use libafl_bolts::{ + rands::{RomuDuoJrRand, StdRand}, + tuples::tuple_list, + }; #[cfg(miri)] use crate::stages::ExecutionCountRestartHelperMetadata; @@ -221,7 +197,15 @@ mod tests { InMemoryCorpus, StdRand, InMemoryCorpus, - > = postcard::from_bytes(state_serialized.as_slice()).unwrap(); + > = postcard::from_bytes::< + StdState< + BytesInput, + InMemoryCorpus, + RomuDuoJrRand, + InMemoryCorpus, + >, + >(state_serialized.as_slice()) + .unwrap(); assert_eq!(state.corpus().count(), state_deserialized.corpus().count()); let corpus_serialized = postcard::to_allocvec(state.corpus()).unwrap(); diff --git a/libafl/src/monitors/disk.rs b/libafl/src/monitors/disk.rs index 754700ccaa..ed745ae07b 100644 --- a/libafl/src/monitors/disk.rs +++ b/libafl/src/monitors/disk.rs @@ -1,4 +1,4 @@ -//! Monitors that wrap a base one and log on disk +//! Monitors that wrap a base monitor and also log to disk using different formats like `JSON` and `TOML`. use alloc::{string::String, vec::Vec}; use core::time::Duration; @@ -13,9 +13,9 @@ use serde_json::json; use crate::monitors::{ClientStats, Monitor, NopMonitor}; -/// Wrap a monitor and log the current state of the monitor into a TOML file. +/// Wrap a monitor and log the current state of the monitor into a Toml file. #[derive(Debug, Clone)] -pub struct OnDiskTOMLMonitor +pub struct OnDiskTomlMonitor where M: Monitor, { @@ -25,7 +25,7 @@ where update_interval: Duration, } -impl Monitor for OnDiskTOMLMonitor +impl Monitor for OnDiskTomlMonitor where M: Monitor, { @@ -59,10 +59,10 @@ where if cur_time - self.last_update >= self.update_interval { self.last_update = cur_time; - let mut file = File::create(&self.filename).expect("Failed to open the TOML file"); + let mut file = File::create(&self.filename).expect("Failed to open the Toml file"); write!( &mut file, - "# This TOML is generated using the OnDiskMonitor component of LibAFL + "# This Toml is generated using the OnDiskMonitor component of LibAFL [global] run_time = \"{}\" @@ -79,7 +79,7 @@ exec_sec = {} self.total_execs(), self.execs_per_sec() ) - .expect("Failed to write to the TOML file"); + .expect("Failed to write to the Toml file"); for (i, client) in self.client_stats_mut().iter_mut().enumerate() { let exec_sec = client.execs_per_sec(cur_time); @@ -95,7 +95,7 @@ exec_sec = {} ", i, client.corpus_size, client.objective_size, client.executions, exec_sec ) - .expect("Failed to write to the TOML file"); + .expect("Failed to write to the Toml file"); for (key, val) in &client.user_monitor { let k: String = key @@ -104,7 +104,7 @@ exec_sec = {} .filter(|c| c.is_alphanumeric() || *c == '_') .collect(); writeln!(&mut file, "{k} = \"{val}\"") - .expect("Failed to write to the TOML file"); + .expect("Failed to write to the Toml file"); } } @@ -115,11 +115,11 @@ exec_sec = {} } } -impl OnDiskTOMLMonitor +impl OnDiskTomlMonitor where M: Monitor, { - /// Create new [`OnDiskTOMLMonitor`] + /// Create new [`OnDiskTomlMonitor`] #[must_use] pub fn new

(filename: P, base: M) -> Self where @@ -128,7 +128,7 @@ where Self::with_update_interval(filename, base, Duration::from_secs(60)) } - /// Create new [`OnDiskTOMLMonitor`] with custom update interval + /// Create new [`OnDiskTomlMonitor`] with custom update interval #[must_use] pub fn with_update_interval

(filename: P, base: M, update_interval: Duration) -> Self where @@ -143,8 +143,8 @@ where } } -impl OnDiskTOMLMonitor { - /// Create new [`OnDiskTOMLMonitor`] without a base +impl OnDiskTomlMonitor { + /// Create new [`OnDiskTomlMonitor`] without a base #[must_use] pub fn nop

(filename: P) -> Self where @@ -155,8 +155,8 @@ impl OnDiskTOMLMonitor { } #[derive(Debug, Clone)] -/// Wraps a base monitor and continuously appends the current statistics to a JSON lines file. -pub struct OnDiskJSONMonitor +/// Wraps a base monitor and continuously appends the current statistics to a Json lines file. +pub struct OnDiskJsonMonitor where F: FnMut(&mut M) -> bool, M: Monitor, @@ -167,12 +167,12 @@ where log_record: F, } -impl OnDiskJSONMonitor +impl OnDiskJsonMonitor where F: FnMut(&mut M) -> bool, M: Monitor, { - /// Create a new [`OnDiskJSONMonitor`] + /// Create a new [`OnDiskJsonMonitor`] pub fn new

(filename: P, base: M, log_record: F) -> Self where P: Into, @@ -187,7 +187,7 @@ where } } -impl Monitor for OnDiskJSONMonitor +impl Monitor for OnDiskJsonMonitor where F: FnMut(&mut M) -> bool, M: Monitor, @@ -225,7 +225,7 @@ where "exec_sec": self.base.execs_per_sec(), "client_stats": self.client_stats(), }); - writeln!(&file, "{line}").expect("Unable to write JSON to file"); + writeln!(&file, "{line}").expect("Unable to write Json to file"); } self.base.display(event_msg, sender_id); } diff --git a/libafl/src/monitors/mod.rs b/libafl/src/monitors/mod.rs index 57fb6b501e..0a6cf0f6bc 100644 --- a/libafl/src/monitors/mod.rs +++ b/libafl/src/monitors/mod.rs @@ -4,11 +4,9 @@ pub mod multi; pub use multi::MultiMonitor; #[cfg(all(feature = "tui_monitor", feature = "std"))] -#[allow(missing_docs)] pub mod tui; #[cfg(all(feature = "prometheus_monitor", feature = "std"))] -#[allow(missing_docs)] pub mod prometheus; use alloc::string::ToString; @@ -20,7 +18,7 @@ use alloc::{borrow::Cow, fmt::Debug, string::String, vec::Vec}; use core::{fmt, fmt::Write, time::Duration}; #[cfg(feature = "std")] -pub use disk::{OnDiskJSONMonitor, OnDiskTOMLMonitor}; +pub use disk::{OnDiskJsonMonitor, OnDiskTomlMonitor}; use hashbrown::HashMap; use libafl_bolts::{current_time, format_duration_hms, ClientId}; use serde::{Deserialize, Serialize}; @@ -320,8 +318,11 @@ fn prettify_float(value: f64) -> String { value => (value, ""), }; match value { + value if value >= 1000000.0 => { + format!("{value:.2}{suffix}") + } value if value >= 1000.0 => { - format!("{value}{suffix}") + format!("{value:.1}{suffix}") } value if value >= 100.0 => { format!("{value:.1}{suffix}") @@ -371,7 +372,7 @@ pub struct ClientStats { } impl ClientStats { - /// We got a new information about executions for this client, insert them. + /// We got new information about executions for this client, insert them. #[cfg(feature = "afl_exec_sec")] pub fn update_executions(&mut self, executions: u64, cur_time: Duration) { let diff = cur_time @@ -399,7 +400,7 @@ impl ClientStats { self.executions = self.prev_state_executions + executions; } - /// We got a new information about corpus size for this client, insert them. + /// We got new information about corpus size for this client, insert them. pub fn update_corpus_size(&mut self, corpus_size: u64) { self.corpus_size = corpus_size; self.last_corpus_time = current_time(); diff --git a/libafl/src/monitors/multi.rs b/libafl/src/monitors/multi.rs index cf338781f3..e5a3e56c0e 100644 --- a/libafl/src/monitors/multi.rs +++ b/libafl/src/monitors/multi.rs @@ -1,4 +1,4 @@ -//! Monitor to display both cumulative and per-client monitor +//! The [`MultiMonitor`] displays both cumulative and per-client stats. use alloc::{string::String, vec::Vec}; use core::{ diff --git a/libafl/src/monitors/prometheus.rs b/libafl/src/monitors/prometheus.rs index 2ef8227b9b..1817b33cfa 100644 --- a/libafl/src/monitors/prometheus.rs +++ b/libafl/src/monitors/prometheus.rs @@ -1,26 +1,31 @@ -// ===== overview for prommon ===== -// The client (i.e., the fuzzer) sets up an HTTP endpoint (/metrics). -// The endpoint contains metrics such as execution rate. - -// A prometheus server (can use a precompiled binary or docker) then scrapes \ -// the endpoint at regular intervals (configurable via prometheus.yml file). -// ==================== -// -// == how to use it === -// This monitor should plug into any fuzzer similar to other monitors. -// In your fuzzer, include: -// ```rust,ignore -// use libafl::monitors::PrometheusMonitor; -// ``` -// as well as: -// ```rust,ignore -// let listener = "127.0.0.1:8080".to_string(); // point prometheus to scrape here in your prometheus.yml -// let mon = PrometheusMonitor::new(listener, |s| log::info!("{s}")); -// and then like with any other monitor, pass it into the event manager like so: -// let mut mgr = SimpleEventManager::new(mon); -// ``` -// When using docker, you may need to point prometheus.yml to the docker0 interface or host.docker.internal -// ==================== +//! The [`PrometheusMonitor`] logs fuzzer progress to a prometheus endpoint. +//! +//! ## Overview +//! +//! The client (i.e., the fuzzer) sets up an HTTP endpoint (/metrics). +//! The endpoint contains metrics such as execution rate. +//! +//! A prometheus server (can use a precompiled binary or docker) then scrapes +//! the endpoint at regular intervals (configurable via prometheus.yml file). +//! +//! ## How to use it +//! +//! Create a [`PrometheusMonitor`] and plug it into any fuzzer similar to other monitors. +//! In your fuzzer: +//! +//! ```rust +//! // First, include: +//! use libafl::monitors::PrometheusMonitor; +//! +//! // Then, create the monitor: +//! let listener = "127.0.0.1:8080".to_string(); // point prometheus to scrape here in your prometheus.yml +//! let mon = PrometheusMonitor::new(listener, |s| log::info!("{s}")); +//! +//! // and finally, like with any other monitor, pass it into the event manager like so: +//! // let mgr = SimpleEventManager::new(mon); +//! ``` +//! +//! When using docker, you may need to point `prometheus.yml` to the `docker0` interface or `host.docker.internal` use alloc::{borrow::Cow, fmt::Debug, string::String, vec::Vec}; use core::{fmt, time::Duration}; @@ -194,6 +199,9 @@ impl PrometheusMonitor where F: FnMut(&str), { + /// Create a new [`PrometheusMonitor`]. + /// The `listener` is the address to send logs to. + /// The `print_fn` is the printing function that can output the logs otherwise. pub fn new(listener: String, print_fn: F) -> Self { // Gauge's implementation of clone uses Arc let corpus_count = Family::::default(); @@ -285,9 +293,9 @@ where } } -// set up an HTTP endpoint /metrics +/// Set up an HTTP endpoint /metrics #[allow(clippy::too_many_arguments)] -pub async fn serve_metrics( +pub(crate) async fn serve_metrics( listener: String, corpus: Family, objectives: Family, @@ -297,8 +305,6 @@ pub async fn serve_metrics( clients_count: Family, custom_stat: Family>, ) -> Result<(), std::io::Error> { - tide::log::start(); - let mut registry = Registry::default(); registry.register("corpus_count", "Number of test cases in the corpus", corpus); @@ -349,12 +355,16 @@ pub async fn serve_metrics( Ok(()) } +/// Struct used to define the labels in `prometheus`. #[derive(Clone, Hash, PartialEq, Eq, EncodeLabelSet, Debug)] pub struct Labels { - client: u32, // sender_id: u32, to differentiate between clients when multiple are spawned. - stat: Cow<'static, str>, // for custom_stat filtering. + /// The `sender_id` helps to differentiate between clients when multiple are spawned. + client: u32, + /// Used for `custom_stat` filtering. + stat: Cow<'static, str>, } +/// The state for this monitor. #[derive(Clone)] struct State { registry: Arc, diff --git a/libafl/src/monitors/tui/mod.rs b/libafl/src/monitors/tui/mod.rs index 149760aa69..77cbbe0ee9 100644 --- a/libafl/src/monitors/tui/mod.rs +++ b/libafl/src/monitors/tui/mod.rs @@ -1,4 +1,6 @@ -//! Monitor based on ratatui +//! [`TuiMonitor`] is a fancy-looking TUI monitor similar to `AFL`. +//! +//! It's based on [ratatui](https://ratatui.rs/) use alloc::{borrow::Cow, boxed::Box, string::ToString}; use core::cmp; @@ -24,30 +26,60 @@ use hashbrown::HashMap; use libafl_bolts::{current_time, format_duration_hms, ClientId}; use ratatui::{backend::CrosstermBackend, Terminal}; use serde_json::Value; +use typed_builder::TypedBuilder; #[cfg(feature = "introspection")] use super::{ClientPerfMonitor, PerfFeature}; use crate::monitors::{Aggregator, AggregatorOps, ClientStats, Monitor, UserStats, UserStatsValue}; +#[allow(missing_docs)] pub mod ui; -use ui::TuiUI; +use ui::TuiUi; const DEFAULT_TIME_WINDOW: u64 = 60 * 10; // 10 min const DEFAULT_LOGS_NUMBER: usize = 128; +#[derive(Debug, Clone, TypedBuilder)] +#[builder(build_method(into = TuiMonitor), builder_method(vis = "pub(crate)", + doc = "Build the [`TuiMonitor`] from the set values"))] +/// Settings to create a new [`TuiMonitor`]. +/// Use `TuiMonitor::builder()` or create this config and call `.into()` to create a new [`TuiMonitor`]. +pub struct TuiMonitorConfig { + /// The title to show + #[builder(default_code = r#""LibAFL Fuzzer".to_string()"#, setter(into))] + pub title: String, + /// A version string to show for this (optional) + #[builder(default_code = r#""default".to_string()"#, setter(into))] + pub version: String, + /// Creates the monitor with an explicit `start_time`. + /// If nothings was set, this will use [`current_time`] instead. + #[builder(default_code = "current_time()")] + pub start_time: Duration, + /// Enables unicode TUI graphics, Looks better but may interfere with old terminals. + #[builder(default = true)] + pub enhanced_graphics: bool, +} + +/// A single status entry for timings #[derive(Debug, Copy, Clone)] pub struct TimedStat { + /// The time pub time: Duration, + /// The item pub item: u64, } +/// Stats for timings #[derive(Debug, Clone)] pub struct TimedStats { + /// Series of [`TimedStat`] entries pub series: VecDeque, + /// The time window to keep track of pub window: Duration, } impl TimedStats { + /// Create a new [`TimedStats`] struct #[must_use] pub fn new(window: Duration) -> Self { Self { @@ -56,6 +88,7 @@ impl TimedStats { } } + /// Add a stat datapoint pub fn add(&mut self, time: Duration, item: u64) { if self.series.is_empty() || self.series.back().unwrap().item != item { if self.series.front().is_some() @@ -67,6 +100,7 @@ impl TimedStats { } } + /// Add a stat datapoint for the `current_time` pub fn add_now(&mut self, item: u64) { if self.series.is_empty() || self.series[self.series.len() - 1].item != item { let time = current_time(); @@ -79,6 +113,7 @@ impl TimedStats { } } + /// Change the window duration pub fn update_window(&mut self, window: Duration) { self.window = window; while !self.series.is_empty() @@ -89,18 +124,25 @@ impl TimedStats { } } +/// The context to show performance metrics #[cfg(feature = "introspection")] #[derive(Debug, Default, Clone)] pub struct PerfTuiContext { + /// Time spent in the scheduler pub scheduler: f64, + /// Time spent in the event manager pub manager: f64, + /// Additional time pub unmeasured: f64, + /// Time spent in each individual stage pub stages: Vec>, + /// Time spent in each individual feedback pub feedbacks: Vec<(String, f64)>, } #[cfg(feature = "introspection")] impl PerfTuiContext { + /// Get the data for performance metrics #[allow(clippy::cast_precision_loss)] pub fn grab_data(&mut self, m: &ClientPerfMonitor) { // Calculate the elapsed time from the monitor @@ -164,15 +206,21 @@ impl PerfTuiContext { } } +/// Data struct to process timings #[derive(Debug, Default, Clone)] pub struct ProcessTiming { + /// The start time pub client_start_time: Duration, + /// The executions speed pub exec_speed: String, + /// Timing of the last new corpus entry pub last_new_entry: Duration, + /// Timing of the last new solution pub last_saved_solution: Duration, } impl ProcessTiming { + /// Create a new [`ProcessTiming`] struct fn new() -> Self { Self { exec_speed: "0".to_string(), @@ -181,6 +229,8 @@ impl ProcessTiming { } } +/// The geometry of a single data point +#[allow(missing_docs)] #[derive(Debug, Default, Clone)] pub struct ItemGeometry { pub pending: u64, @@ -191,6 +241,7 @@ pub struct ItemGeometry { } impl ItemGeometry { + /// Create a new [`ItemGeometry`] fn new() -> Self { Self { stability: "0%".to_string(), @@ -199,6 +250,8 @@ impl ItemGeometry { } } +/// The context for a single client tracked in this [`TuiMonitor`] +#[allow(missing_docs)] #[derive(Debug, Default, Clone)] pub struct ClientTuiContext { pub corpus: u64, @@ -215,6 +268,7 @@ pub struct ClientTuiContext { } impl ClientTuiContext { + /// Grab data for a single client pub fn grab_data(&mut self, client: &ClientStats, exec_sec: String) { self.corpus = client.corpus_size; self.objectives = client.objective_size; @@ -267,6 +321,8 @@ impl ClientTuiContext { } } +/// The [`TuiContext`] for this [`TuiMonitor`] +#[allow(missing_docs)] #[derive(Debug, Clone)] pub struct TuiContext { pub graphs: Vec, @@ -326,7 +382,7 @@ impl TuiContext { } } -/// Tracking monitor during fuzzing and display with ratatui +/// Tracking monitor during fuzzing and display with [`ratatui`](https://ratatui.rs/) #[derive(Debug, Clone)] pub struct TuiMonitor { pub(crate) context: Arc>, @@ -336,6 +392,16 @@ pub struct TuiMonitor { aggregator: Aggregator, } +impl From for TuiMonitor { + #[allow(deprecated)] + fn from(builder: TuiMonitorConfig) -> Self { + Self::with_time( + TuiUi::with_version(builder.title, builder.version, builder.enhanced_graphics), + builder.start_time, + ) + } +} + impl Monitor for TuiMonitor { /// The client monitor, mutable /// This also includes disabled "padding" clients. @@ -443,15 +509,35 @@ impl Monitor for TuiMonitor { } impl TuiMonitor { - /// Creates the monitor + /// Create a builder for [`TuiMonitor`] + pub fn builder() -> TuiMonitorConfigBuilder { + TuiMonitorConfig::builder() + } + + /// Creates the monitor. + /// + /// # Deprecation Note + /// Use `TuiMonitor::builder()` instead. + #[deprecated( + since = "0.13.2", + note = "Please use TuiMonitor::builder() instead of creating TuiUi directly." + )] #[must_use] - pub fn new(tui_ui: TuiUI) -> Self { + #[allow(deprecated)] + pub fn new(tui_ui: TuiUi) -> Self { Self::with_time(tui_ui, current_time()) } /// Creates the monitor with a given `start_time`. + /// + /// # Deprecation Note + /// Use `TuiMonitor::builder()` instead. + #[deprecated( + since = "0.13.2", + note = "Please use TuiMonitor::builder() instead of creating TuiUi directly." + )] #[must_use] - pub fn with_time(tui_ui: TuiUI, start_time: Duration) -> Self { + pub fn with_time(tui_ui: TuiUi, start_time: Duration) -> Self { let context = Arc::new(RwLock::new(TuiContext::new(start_time))); enable_raw_mode().unwrap(); @@ -565,7 +651,7 @@ impl TuiMonitor { fn run_tui_thread( context: Arc>, tick_rate: Duration, - tui_ui: TuiUI, + tui_ui: TuiUi, stdout_provider: impl Send + Sync + 'static + Fn() -> W, ) { thread::spawn(move || -> io::Result<()> { diff --git a/libafl/src/monitors/tui/ui.rs b/libafl/src/monitors/tui/ui.rs index 1a2dc47c93..e4c07c9116 100644 --- a/libafl/src/monitors/tui/ui.rs +++ b/libafl/src/monitors/tui/ui.rs @@ -21,7 +21,7 @@ use super::{ }; #[derive(Default, Debug)] -pub struct TuiUI { +pub struct TuiUi { title: String, version: String, enhanced_graphics: bool, @@ -34,13 +34,13 @@ pub struct TuiUI { pub should_quit: bool, } -impl TuiUI { +impl TuiUi { #[must_use] pub fn new(title: String, enhanced_graphics: bool) -> Self { Self::with_version(title, String::from("default"), enhanced_graphics) } - // create the TuiUI with a given `version`. + // create the TuiUi with a given `version`. #[must_use] pub fn with_version(title: String, version: String, enhanced_graphics: bool) -> Self { Self { @@ -49,7 +49,7 @@ impl TuiUI { enhanced_graphics, show_logs: true, clients_idx: 1, - ..TuiUI::default() + ..TuiUi::default() } } pub fn on_key(&mut self, c: char) { @@ -116,7 +116,7 @@ impl TuiUI { } else { [Constraint::Percentage(50), Constraint::Percentage(50)].as_ref() }) - .split(f.size()); + .split(f.area()); let top_body = body[0]; let mid_body = body[1]; @@ -148,8 +148,7 @@ impl TuiUI { .constraints([Constraint::Length(3), Constraint::Min(0)].as_ref()) .split(left_top_layout[0]); - let mut status_bar: String = self.title.clone(); - status_bar = status_bar + " (" + self.version.as_str() + ")"; + let status_bar: String = format!("{} ({})", self.title, self.version.as_str()); let text = vec![Line::from(Span::styled( &status_bar, @@ -278,7 +277,7 @@ impl TuiUI { let left_top_layout = Layout::default() .direction(Direction::Vertical) - .constraints([Constraint::Length(6), Constraint::Length(0)].as_ref()) + .constraints([Constraint::Length(6), Constraint::Length(5)].as_ref()) .split(left_layout[0]); let left_bottom_layout = left_top_layout[1]; self.draw_process_timing_text(f, app, left_top_layout[0], false); @@ -286,7 +285,7 @@ impl TuiUI { let right_top_layout = Layout::default() .direction(Direction::Vertical) - .constraints([Constraint::Length(7), Constraint::Length(0)].as_ref()) + .constraints([Constraint::Length(7), Constraint::Length(5)].as_ref()) .split(right_layout); let right_bottom_layout = right_top_layout[1]; self.draw_item_geometry_text(f, app, right_top_layout[0], false); @@ -425,18 +424,22 @@ impl TuiUI { area: Rect, is_overall: bool, ) { - let item_geometry: ItemGeometry = if is_overall { - app.read().unwrap().total_item_geometry.clone() + let tui_context = app.read().unwrap(); + let empty_geometry: ItemGeometry = ItemGeometry::new(); + let item_geometry: &ItemGeometry = if is_overall { + &tui_context.total_item_geometry } else if self.clients < 2 { - ItemGeometry::new() + &empty_geometry } else { - app.read() - .unwrap() - .clients - .get(&self.clients_idx) - .unwrap() - .item_geometry - .clone() + let clients = &tui_context.clients; + let client = clients.get(&self.clients_idx); + let client = client.as_ref(); + if let Some(client) = client { + &client.item_geometry + } else { + log::warn!("Client {} was `None`. Race condition?", &self.clients_idx); + &empty_geometry + } }; let items = vec![ @@ -458,7 +461,7 @@ impl TuiUI { ]), Row::new(vec![ Cell::from(Span::raw("stability")), - Cell::from(Span::raw(item_geometry.stability)), + Cell::from(Span::raw(&item_geometry.stability)), ]), ]; @@ -495,26 +498,25 @@ impl TuiUI { area: Rect, is_overall: bool, ) { - let tup: (Duration, ProcessTiming) = if is_overall { - let tui_context = app.read().unwrap(); - ( - tui_context.start_time, - tui_context.total_process_timing.clone(), - ) + let tui_context = app.read().unwrap(); + let empty_timing: ProcessTiming = ProcessTiming::new(); + let tup: (Duration, &ProcessTiming) = if is_overall { + (tui_context.start_time, &tui_context.total_process_timing) } else if self.clients < 2 { - (current_time(), ProcessTiming::new()) + (current_time(), &empty_timing) } else { - let client = app - .read() - .unwrap() - .clients - .get(&self.clients_idx) - .unwrap() - .clone(); - ( - client.process_timing.client_start_time, - client.process_timing, - ) + let clients = &tui_context.clients; + let client = clients.get(&self.clients_idx); + let client = client.as_ref(); + if let Some(client) = client { + ( + client.process_timing.client_start_time, + &client.process_timing, + ) + } else { + log::warn!("Client {} was `None`. Race condition?", &self.clients_idx); + (current_time(), &empty_timing) + } }; let items = vec![ Row::new(vec![ @@ -523,7 +525,7 @@ impl TuiUI { ]), Row::new(vec![ Cell::from(Span::raw("exec speed")), - Cell::from(Span::raw(tup.1.exec_speed)), + Cell::from(Span::raw(&tup.1.exec_speed)), ]), Row::new(vec![ Cell::from(Span::raw("last new entry")), @@ -646,10 +648,6 @@ impl TuiUI { ] }; - let chunks = Layout::default() - .constraints([Constraint::Percentage(100)].as_ref()) - .split(area); - let table = Table::default() .rows(items) .block( @@ -663,7 +661,7 @@ impl TuiUI { .borders(Borders::ALL), ) .widths([Constraint::Ratio(1, 2), Constraint::Ratio(1, 2)]); - f.render_widget(table, chunks[0]); + f.render_widget(table, area); } fn draw_client_generic_text( @@ -702,10 +700,6 @@ impl TuiUI { ] }; - let chunks = Layout::default() - .constraints([Constraint::Percentage(100)].as_ref()) - .split(area); - let table = Table::default() .rows(items) .block( @@ -719,7 +713,7 @@ impl TuiUI { .borders(Borders::ALL), ) .widths([Constraint::Ratio(1, 2), Constraint::Ratio(1, 2)]); - f.render_widget(table, chunks[0]); + f.render_widget(table, area); } #[cfg(feature = "introspection")] diff --git a/libafl/src/mutators/encoded_mutations.rs b/libafl/src/mutators/encoded_mutations.rs index 53fa0aebd3..deca65177d 100644 --- a/libafl/src/mutators/encoded_mutations.rs +++ b/libafl/src/mutators/encoded_mutations.rs @@ -1,7 +1,10 @@ //! Mutations for [`EncodedInput`]s //! use alloc::{borrow::Cow, vec::Vec}; -use core::cmp::{max, min}; +use core::{ + cmp::{max, min}, + num::NonZero, +}; use libafl_bolts::{ rands::Rand, @@ -10,12 +13,12 @@ use libafl_bolts::{ use crate::{ corpus::Corpus, - inputs::{EncodedInput, UsesInput}, + inputs::EncodedInput, mutators::{ mutations::{buffer_copy, buffer_self_copy, ARITH_MAX}, MutationResult, Mutator, Named, }, - random_corpus_id_with_disabled, + nonzero, random_corpus_id_with_disabled, state::{HasCorpus, HasMaxSize, HasRand}, Error, }; @@ -123,8 +126,8 @@ impl Mutator for EncodedAddMutator { Ok(MutationResult::Skipped) } else { let val = state.rand_mut().choose(input.codes_mut()).unwrap(); - let num = 1 + state.rand_mut().below(ARITH_MAX) as u32; - *val = match state.rand_mut().below(2) { + let num = 1 + state.rand_mut().below(nonzero!(ARITH_MAX)) as u32; + *val = match state.rand_mut().below(nonzero!(2)) { 0 => val.wrapping_add(num), _ => val.wrapping_sub(num), }; @@ -158,9 +161,16 @@ impl Mutator for EncodedDeleteMutator { if size <= 2 { return Ok(MutationResult::Skipped); } - - let off = state.rand_mut().below(size); - let len = state.rand_mut().below(size - off); + // # Safety + // The size is larger than 1 here (checked just above) + let off = state + .rand_mut() + .below(unsafe { NonZero::new(size).unwrap_unchecked() }); + // # Safety + // The size of the offset is below size, the value is never 0. + let len = state + .rand_mut() + .below(unsafe { NonZero::new(size - off).unwrap_unchecked() }); input.codes_mut().drain(off..off + len); Ok(MutationResult::Mutated) @@ -195,11 +205,17 @@ where fn mutate(&mut self, state: &mut S, input: &mut EncodedInput) -> Result { let max_size = state.max_size(); let size = input.codes().len(); - if size == 0 { + let Some(nz) = NonZero::new(size) else { return Ok(MutationResult::Skipped); - } - let off = state.rand_mut().below(size + 1); - let mut len = 1 + state.rand_mut().below(min(16, size)); + }; + + // # Safety + // The input.codes() len should never be close to an usize, so adding 1 will always result in a non-zero value. + // Worst case, we will get a wrong int value as return, not too bad. + let off = state + .rand_mut() + .below(unsafe { NonZero::new(size + 1).unwrap_unchecked() }); + let mut len = 1 + state.rand_mut().below(nz); if size + len > max_size { if max_size > size { @@ -209,10 +225,10 @@ where } } - let from = if size == len { - 0 + let from = if let Some(bound) = NonZero::new(size - len) { + state.rand_mut().below(bound) } else { - state.rand_mut().below(size - len) + 0 }; input.codes_mut().resize(size + len, 0); @@ -252,11 +268,21 @@ impl Mutator for EncodedCopyMutator { let size = input.codes().len(); if size <= 1 { return Ok(MutationResult::Skipped); - } + }; - let from = state.rand_mut().below(size); - let to = state.rand_mut().below(size); - let len = 1 + state.rand_mut().below(size - max(from, to)); + // # Safety + // it's larger than 1 + let from = state + .rand_mut() + .below(unsafe { NonZero::new(size).unwrap_unchecked() }); + let to = state + .rand_mut() + .below(unsafe { NonZero::new(size).unwrap_unchecked() }); + // # Safety + // Both from and to are smaller than size, so size minus any of these can never be 0. + let len = 1 + state + .rand_mut() + .below(unsafe { NonZero::new(size - max(from, to)).unwrap_unchecked() }); unsafe { buffer_self_copy(input.codes_mut(), from, to, len); @@ -285,23 +311,29 @@ impl EncodedCopyMutator { #[derive(Debug, Default)] pub struct EncodedCrossoverInsertMutator; -impl Mutator for EncodedCrossoverInsertMutator +impl Mutator for EncodedCrossoverInsertMutator where - S: UsesInput + HasRand + HasCorpus + HasMaxSize, + S: HasRand + HasCorpus + HasMaxSize, + S::Corpus: Corpus, { fn mutate(&mut self, state: &mut S, input: &mut EncodedInput) -> Result { let size = input.codes().len(); + let id = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); // We don't want to use the testcase we're already using for splicing - let idx = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); if let Some(cur) = state.corpus().current() { - if idx == *cur { + if id == *cur { return Ok(MutationResult::Skipped); } } + let Some(nz) = NonZero::new(size) else { + return Ok(MutationResult::Skipped); + }; + let other_size = { - let mut other_testcase = state.corpus().get_from_all(idx)?.borrow_mut(); + // new scope to make the borrow checker happy + let mut other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); other_testcase.load_input(state.corpus())?.codes().len() }; @@ -309,10 +341,18 @@ where return Ok(MutationResult::Skipped); } + // # Safety + // it's larger than 1 let max_size = state.max_size(); - let from = state.rand_mut().below(other_size); - let to = state.rand_mut().below(size); - let mut len = 1 + state.rand_mut().below(other_size - from); + let from = state + .rand_mut() + .below(unsafe { NonZero::new(other_size).unwrap_unchecked() }); + let to = state.rand_mut().below(nz); + // # Safety + // from is smaller than other_size, other_size is larger than 2, so the subtraction is larger than 0. + let mut len = 1 + state + .rand_mut() + .below(unsafe { NonZero::new(other_size - from).unwrap_unchecked() }); if size + len > max_size { if max_size > size { @@ -322,7 +362,7 @@ where } } - let other_testcase = state.corpus().get_from_all(idx)?.borrow_mut(); + let other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); // no need to `load_input` again - we did that above already. let other = other_testcase.input().as_ref().unwrap(); @@ -355,39 +395,50 @@ impl EncodedCrossoverInsertMutator { #[derive(Debug, Default)] pub struct EncodedCrossoverReplaceMutator; -impl Mutator for EncodedCrossoverReplaceMutator +impl Mutator for EncodedCrossoverReplaceMutator where - S: UsesInput + HasRand + HasCorpus, + S: HasRand + HasCorpus, + S::Corpus: Corpus, { fn mutate(&mut self, state: &mut S, input: &mut EncodedInput) -> Result { let size = input.codes().len(); - if size == 0 { - return Ok(MutationResult::Skipped); - } + let id = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); // We don't want to use the testcase we're already using for splicing - let idx = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); if let Some(cur) = state.corpus().current() { - if idx == *cur { + if id == *cur { return Ok(MutationResult::Skipped); } } let other_size = { // new scope to make the borrow checker happy - let mut other_testcase = state.corpus().get_from_all(idx)?.borrow_mut(); + let mut other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); other_testcase.load_input(state.corpus())?.codes().len() }; if other_size < 2 { return Ok(MutationResult::Skipped); } + // # Safety + // other_size >= 2 + let from = state + .rand_mut() + .below(unsafe { NonZero::new(other_size).unwrap_unchecked() }); - let from = state.rand_mut().below(other_size); - let len = state.rand_mut().below(min(other_size - from, size)); - let to = state.rand_mut().below(size - len); + // # Safety + // size > 0, other_size > from, + let len = state + .rand_mut() + .below(unsafe { NonZero::new(min(other_size - from, size)).unwrap_unchecked() }); - let other_testcase = state.corpus().get_from_all(idx)?.borrow_mut(); + // # Safety + // size is non-zero, len is below min(size, ...), so the subtraction will always be positive. + let to = state + .rand_mut() + .below(unsafe { NonZero::new(size - len).unwrap_unchecked() }); + + let other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); // no need to load the input again, it'll already be present at this point. let other = other_testcase.input().as_ref().unwrap(); diff --git a/libafl/src/mutators/gramatron.rs b/libafl/src/mutators/gramatron.rs index 5fcd7161a6..987a81f210 100644 --- a/libafl/src/mutators/gramatron.rs +++ b/libafl/src/mutators/gramatron.rs @@ -1,7 +1,8 @@ -//! Gramatron is the rewritten gramatron fuzzer in rust. +//! [`GramatronRandomMutator`] is a random mutator using grammar automatons to perform grammar-aware fuzzing. +//! //! See the original gramatron repo [`Gramatron`](https://github.com/HexHive/Gramatron) for more details. use alloc::{borrow::Cow, vec::Vec}; -use core::cmp::max; +use core::{cmp::max, num::NonZero}; use hashbrown::HashMap; use libafl_bolts::{ @@ -11,11 +12,11 @@ use libafl_bolts::{ use serde::{Deserialize, Serialize}; use crate::{ - corpus::{Corpus, HasTestcase}, + corpus::Corpus, generators::GramatronGenerator, inputs::{GramatronInput, Terminal}, mutators::{MutationResult, Mutator}, - random_corpus_id, + nonzero, random_corpus_id, state::{HasCorpus, HasRand}, Error, HasMetadata, }; @@ -31,7 +32,7 @@ where generator: &'a GramatronGenerator<'a, S>, } -impl<'a, S> Mutator for GramatronRandomMutator<'a, S> +impl Mutator for GramatronRandomMutator<'_, S> where S: HasRand + HasMetadata, { @@ -41,7 +42,12 @@ where input: &mut GramatronInput, ) -> Result { if !input.terminals().is_empty() { - let size = state.rand_mut().below(input.terminals().len() + 1); + // # Safety + // We can assume that the count of terminals + 1 will never wrap around (otherwise it will break somewhere else). + // So len + 1 is always non-zero. + let size = state + .rand_mut() + .below(unsafe { NonZero::new(input.terminals().len() + 1).unwrap_unchecked() }); input.terminals_mut().truncate(size); } if self.generator.append_generated_terminals(input, state) > 0 { @@ -52,7 +58,7 @@ where } } -impl<'a, S> Named for GramatronRandomMutator<'a, S> +impl Named for GramatronRandomMutator<'_, S> where S: HasRand + HasMetadata, { @@ -104,26 +110,27 @@ impl GramatronIdxMapMetadata { #[derive(Default, Debug)] pub struct GramatronSpliceMutator; -impl Mutator for GramatronSpliceMutator +impl Mutator for GramatronSpliceMutator where - S: HasRand + HasCorpus + HasMetadata + HasTestcase, + S: HasRand + HasCorpus + HasMetadata, + S::Corpus: Corpus, { fn mutate( &mut self, state: &mut S, input: &mut GramatronInput, ) -> Result { - if input.terminals().is_empty() { + let Some(terminals_len) = NonZero::new(input.terminals().len()) else { return Ok(MutationResult::Skipped); - } + }; - let idx = random_corpus_id!(state.corpus(), state.rand_mut()); + let id = random_corpus_id!(state.corpus(), state.rand_mut()); - let insert_at = state.rand_mut().below(input.terminals().len()); + let insert_at = state.rand_mut().below(terminals_len); let rand_num = state.rand_mut().next(); - let mut other_testcase = state.corpus().get(idx)?.borrow_mut(); + let mut other_testcase = state.corpus().get(id)?.borrow_mut(); if !other_testcase.has_metadata::() { let meta = GramatronIdxMapMetadata::new(other_testcase.load_input(state.corpus())?); @@ -215,8 +222,12 @@ where let chosen = *state.rand_mut().choose(&self.states).unwrap(); let chosen_nums = self.counters.get(&chosen).unwrap().0; + let Some(minus_one) = NonZero::new(chosen_nums - 1) else { + return Ok(MutationResult::Skipped); + }; + #[allow(clippy::cast_sign_loss, clippy::pedantic)] - let mut first = state.rand_mut().below(chosen_nums - 1) as i64; + let mut first = state.rand_mut().below(minus_one) as i64; #[allow(clippy::cast_sign_loss, clippy::pedantic)] let mut second = state .rand_mut() @@ -248,7 +259,7 @@ where input.terminals_mut().truncate(idx_1); - for _ in 0..state.rand_mut().below(RECUR_THRESHOLD) { + for _ in 0..state.rand_mut().below(nonzero!(RECUR_THRESHOLD)) { input.terminals_mut().extend_from_slice(&self.feature); } diff --git a/libafl/src/mutators/grimoire.rs b/libafl/src/mutators/grimoire.rs index 97fa1d62ac..1a8300d41f 100644 --- a/libafl/src/mutators/grimoire.rs +++ b/libafl/src/mutators/grimoire.rs @@ -2,7 +2,10 @@ //! See the original repo [`Grimoire`](https://github.com/RUB-SysSec/grimoire) for more details. use alloc::{borrow::Cow, vec::Vec}; -use core::cmp::{max, min}; +use core::{ + cmp::{max, min}, + num::NonZero, +}; use libafl_bolts::{ rands::{choose, fast_bound, Rand}, @@ -30,14 +33,14 @@ fn extend_with_random_generalized( where S: HasMetadata + HasRand + HasCorpus, { - let idx = random_corpus_id!(state.corpus(), state.rand_mut()); + let id = random_corpus_id!(state.corpus(), state.rand_mut()); if state.rand_mut().coinflip(CHOOSE_SUBINPUT_PROB) { if state.rand_mut().coinflip(0.5) { let rand1 = state.rand_mut().next(); let rand2 = state.rand_mut().next(); - let other_testcase = state.corpus().get(idx)?.borrow(); + let other_testcase = state.corpus().get(id)?.borrow(); if let Some(other) = other_testcase .metadata_map() .get::() @@ -88,7 +91,7 @@ where } } - let other_testcase = state.corpus().get(idx)?.borrow(); + let other_testcase = state.corpus().get(id)?.borrow(); if let Some(other) = other_testcase .metadata_map() .get::() @@ -246,15 +249,21 @@ where let tokens_len = { let meta = state.metadata_map().get::(); if let Some(tokens) = meta { - if tokens.is_empty() { + if let Some(tokens_len) = NonZero::new(tokens.tokens().len()) { + tokens_len + } else { return Ok(MutationResult::Skipped); } - tokens.tokens().len() } else { return Ok(MutationResult::Skipped); } }; + let gen = generalised_meta.generalized_mut(); + let Some(_) = NonZero::new(gen.len()) else { + return Err(Error::illegal_state("No generalized metadata found.")); + }; + let token_find = state.rand_mut().below(tokens_len); let mut token_replace = state.rand_mut().below(tokens_len); if token_find == token_replace { @@ -270,8 +279,11 @@ where let mut mutated = MutationResult::Skipped; - let gen = generalised_meta.generalized_mut(); - let rand_idx = fast_bound(rand_idx, gen.len()); + // # Safety + // gen.len() is positive. + let rand_idx = fast_bound(rand_idx, unsafe { + NonZero::new(gen.len()).unwrap_unchecked() + }); 'first: for item in &mut gen[..rand_idx] { if let GeneralizedItem::Bytes(bytes) = item { @@ -279,7 +291,7 @@ where while bytes .len() .checked_sub(token_1.len()) - .map_or(false, |len| i < len) + .is_some_and(|len| i < len) { if bytes[i..].starts_with(token_1) { bytes.splice(i..(i + token_1.len()), token_2.iter().copied()); @@ -302,7 +314,7 @@ where while bytes .len() .checked_sub(token_1.len()) - .map_or(false, |len| i < len) + .is_some_and(|len| i < len) { if bytes[i..].starts_with(token_1) { bytes.splice(i..(i + token_1.len()), token_2.iter().copied()); @@ -363,8 +375,15 @@ where { self.gap_indices.push(i); } - let min_idx = self.gap_indices[state.rand_mut().below(self.gap_indices.len())]; - let max_idx = self.gap_indices[state.rand_mut().below(self.gap_indices.len())]; + + let Some(gap_indeces_len) = NonZero::new(self.gap_indices.len()) else { + return Err(Error::illegal_state( + "Gap indices may not be empty in grimoire mutator!", + )); + }; + + let min_idx = self.gap_indices[state.rand_mut().below(gap_indeces_len)]; + let max_idx = self.gap_indices[state.rand_mut().below(gap_indeces_len)]; let (min_idx, max_idx) = (min(min_idx, max_idx), max(min_idx, max_idx)); diff --git a/libafl/src/mutators/havoc_mutations.rs b/libafl/src/mutators/havoc_mutations.rs new file mode 100644 index 0000000000..0e278e0475 --- /dev/null +++ b/libafl/src/mutators/havoc_mutations.rs @@ -0,0 +1,273 @@ +//! [`crate::mutators::Mutator`] collection equivalent to AFL++'s havoc mutations + +use libafl_bolts::tuples::{Map, Merge}; +use tuple_list::{tuple_list, tuple_list_type}; + +use crate::mutators::{ + mapping::{ + MappedInputFunctionMappingMutator, OptionMappingMutator, + ToMappedInputFunctionMappingMutatorMapper, ToOptionMappingMutatorMapper, + }, + mutations::{ + BitFlipMutator, ByteAddMutator, ByteDecMutator, ByteFlipMutator, ByteIncMutator, + ByteInterestingMutator, ByteNegMutator, ByteRandMutator, BytesCopyMutator, + BytesDeleteMutator, BytesExpandMutator, BytesInsertCopyMutator, BytesInsertMutator, + BytesRandInsertMutator, BytesRandSetMutator, BytesSetMutator, BytesSwapMutator, + CrossoverInsertMutator, CrossoverReplaceMutator, DwordAddMutator, DwordInterestingMutator, + MappedCrossoverInsertMutator, MappedCrossoverReplaceMutator, QwordAddMutator, + WordAddMutator, WordInterestingMutator, + }, +}; + +/// Tuple type of the mutations that compose the Havoc mutator without crossover mutations +pub type HavocMutationsNoCrossoverType = tuple_list_type!( + BitFlipMutator, + ByteFlipMutator, + ByteIncMutator, + ByteDecMutator, + ByteNegMutator, + ByteRandMutator, + ByteAddMutator, + WordAddMutator, + DwordAddMutator, + QwordAddMutator, + ByteInterestingMutator, + WordInterestingMutator, + DwordInterestingMutator, + BytesDeleteMutator, + BytesDeleteMutator, + BytesDeleteMutator, + BytesDeleteMutator, + BytesExpandMutator, + BytesInsertMutator, + BytesRandInsertMutator, + BytesSetMutator, + BytesRandSetMutator, + BytesCopyMutator, + BytesInsertCopyMutator, + BytesSwapMutator, +); + +/// Tuple type of the mutations that compose the Havoc mutator's crossover mutations +pub type HavocCrossoverType = tuple_list_type!(CrossoverInsertMutator, CrossoverReplaceMutator); + +/// Tuple type of the mutations that compose the Havoc mutator's crossover mutations for mapped input types +pub type MappedHavocCrossoverType = tuple_list_type!( + MappedCrossoverInsertMutator, + MappedCrossoverReplaceMutator, +); + +/// Tuple type of the mutations that compose the Havoc mutator +pub type HavocMutationsType = tuple_list_type!( + BitFlipMutator, + ByteFlipMutator, + ByteIncMutator, + ByteDecMutator, + ByteNegMutator, + ByteRandMutator, + ByteAddMutator, + WordAddMutator, + DwordAddMutator, + QwordAddMutator, + ByteInterestingMutator, + WordInterestingMutator, + DwordInterestingMutator, + BytesDeleteMutator, + BytesDeleteMutator, + BytesDeleteMutator, + BytesDeleteMutator, + BytesExpandMutator, + BytesInsertMutator, + BytesRandInsertMutator, + BytesSetMutator, + BytesRandSetMutator, + BytesCopyMutator, + BytesInsertCopyMutator, + BytesSwapMutator, + CrossoverInsertMutator, + CrossoverReplaceMutator, +); + +/// Tuple type of the mutations that compose the Havoc mutator for mapped input types +pub type MappedHavocMutationsType = tuple_list_type!( + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, +); + +/// Tuple type of the mutations that compose the Havoc mutator for mapped input types, for optional byte array input parts +pub type OptionMappedHavocMutationsType = tuple_list_type!( + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator, F1, II>, + MappedInputFunctionMappingMutator< + OptionMappingMutator>, + F1, + II, + >, + MappedInputFunctionMappingMutator< + OptionMappingMutator>, + F1, + II, + >, +); + +/// Get the mutations that compose the Havoc mutator (only applied to single inputs) +#[must_use] +pub fn havoc_mutations_no_crossover() -> HavocMutationsNoCrossoverType { + tuple_list!( + BitFlipMutator::new(), + ByteFlipMutator::new(), + ByteIncMutator::new(), + ByteDecMutator::new(), + ByteNegMutator::new(), + ByteRandMutator::new(), + ByteAddMutator::new(), + WordAddMutator::new(), + DwordAddMutator::new(), + QwordAddMutator::new(), + ByteInterestingMutator::new(), + WordInterestingMutator::new(), + DwordInterestingMutator::new(), + BytesDeleteMutator::new(), + BytesDeleteMutator::new(), + BytesDeleteMutator::new(), + BytesDeleteMutator::new(), + BytesExpandMutator::new(), + BytesInsertMutator::new(), + BytesRandInsertMutator::new(), + BytesSetMutator::new(), + BytesRandSetMutator::new(), + BytesCopyMutator::new(), + BytesInsertCopyMutator::new(), + BytesSwapMutator::new(), + ) +} + +/// Get the mutations that compose the Havoc mutator's crossover strategy +#[must_use] +pub fn havoc_crossover() -> HavocCrossoverType { + tuple_list!( + CrossoverInsertMutator::new(), + CrossoverReplaceMutator::new(), + ) +} + +/// Get the mutations that compose the Havoc mutator's crossover strategy with custom corpus extraction logic +pub fn havoc_crossover_with_corpus_mapper( + input_mapper: F, +) -> MappedHavocCrossoverType +where + F: Clone + Fn(IO) -> O, +{ + tuple_list!( + MappedCrossoverInsertMutator::new(input_mapper.clone()), + MappedCrossoverReplaceMutator::new(input_mapper.clone()), + ) +} + +/// Get the mutations that compose the Havoc mutator's crossover strategy with custom corpus extraction logic +pub fn havoc_crossover_with_corpus_mapper_optional( + input_mapper: F, +) -> MappedHavocCrossoverType +where + F: Clone, +{ + tuple_list!( + MappedCrossoverInsertMutator::new(input_mapper.clone()), + MappedCrossoverReplaceMutator::new(input_mapper.clone()), + ) +} + +/// Get the mutations that compose the Havoc mutator +#[must_use] +pub fn havoc_mutations() -> HavocMutationsType { + havoc_mutations_no_crossover().merge(havoc_crossover()) +} + +/// Get the mutations that compose the Havoc mutator for mapped input types +/// +/// Check the example fuzzer for details on how to use this. +#[must_use] +pub fn mapped_havoc_mutations( + current_input_mapper: F1, + input_from_corpus_mapper: F2, +) -> MappedHavocMutationsType +where + F1: Clone + FnMut(IO1) -> II, + F2: Clone + Fn(IO2) -> O, +{ + havoc_mutations_no_crossover() + .merge(havoc_crossover_with_corpus_mapper(input_from_corpus_mapper)) + .map(ToMappedInputFunctionMappingMutatorMapper::new( + current_input_mapper, + )) +} + +/// Get the mutations that compose the Havoc mutator for mapped input types, for optional input parts +/// +/// Check the example fuzzer for details on how to use this. +#[must_use] +pub fn optional_mapped_havoc_mutations( + current_input_mapper: F1, + input_from_corpus_mapper: F2, +) -> OptionMappedHavocMutationsType +where + F1: Clone + FnMut(IO1) -> II, + F2: Clone + Fn(IO2) -> O, +{ + havoc_mutations_no_crossover() + .merge(havoc_crossover_with_corpus_mapper_optional( + input_from_corpus_mapper, + )) + .map(ToOptionMappingMutatorMapper) + .map(ToMappedInputFunctionMappingMutatorMapper::new( + current_input_mapper, + )) +} diff --git a/libafl/src/mutators/mapping.rs b/libafl/src/mutators/mapping.rs new file mode 100644 index 0000000000..27dc7990d5 --- /dev/null +++ b/libafl/src/mutators/mapping.rs @@ -0,0 +1,414 @@ +//! Allowing mixing and matching between [`Mutator`] and [`crate::inputs::Input`] types. +use alloc::borrow::Cow; +use core::marker::PhantomData; + +use libafl_bolts::{tuples::MappingFunctor, Named}; + +use crate::{ + inputs::MappedInput, + mutators::{MutationResult, Mutator}, + Error, +}; + +/// Mapping [`Mutator`] using a function returning a reference. +/// +/// Allows using [`Mutator`]s for a certain type on (parts of) other input types that can be mapped to this type. +/// +/// # Example +#[cfg_attr(feature = "std", doc = " ```")] +#[cfg_attr(not(feature = "std"), doc = " ```ignore")] +/// use std::vec::Vec; +/// +/// use libafl::{ +/// inputs::MutVecInput, +/// mutators::{ +/// ByteIncMutator, FunctionMappingMutator, MappedInputFunctionMappingMutator, +/// MutationResult, Mutator, +/// }, +/// state::NopState, +/// }; +/// +/// type CustomInput = (Vec,); +/// fn extract_to_ref(input: &mut CustomInput) -> &mut Vec { +/// &mut input.0 +/// } +/// +/// fn extract_from_ref(input: &mut Vec) -> MutVecInput<'_> { +/// input.into() +/// } +/// +/// // construct a mapper that works on &mut Vec +/// let inner: MappedInputFunctionMappingMutator<_, _, MutVecInput<'_>> = +/// MappedInputFunctionMappingMutator::new(extract_from_ref, ByteIncMutator::new()); +/// let mut outer = FunctionMappingMutator::new(extract_to_ref, inner); +/// +/// let mut input: CustomInput = (vec![1],); +/// +/// let mut state: NopState = NopState::new(); +/// let res = outer.mutate(&mut state, &mut input).unwrap(); +/// assert_eq!(res, MutationResult::Mutated); +/// assert_eq!(input, (vec![2],)); +/// ``` +#[derive(Debug)] +pub struct FunctionMappingMutator { + mapper: F, + inner: M, + name: Cow<'static, str>, +} + +impl FunctionMappingMutator { + /// Creates a new [`FunctionMappingMutator`] + pub fn new(mapper: F, inner: M) -> Self + where + M: Named, + { + let name = Cow::Owned(format!("FunctionMappingMutator<{}>", inner.name())); + Self { + mapper, + inner, + name, + } + } +} + +impl Mutator for FunctionMappingMutator +where + F: for<'a> FnMut(&'a mut IO) -> &'a mut II, + M: Mutator, +{ + fn mutate(&mut self, state: &mut S, input: &mut IO) -> Result { + self.inner.mutate(state, (self.mapper)(input)) + } +} + +impl Named for FunctionMappingMutator { + fn name(&self) -> &Cow<'static, str> { + &self.name + } +} + +/// Mapper to use to map a [`tuple_list`] of [`Mutator`]s using [`ToFunctionMappingMutatorMapper`]s. +/// +/// See the explanation of [`ToFunctionMappingMutatorMapper`] for details. +/// +/// # Example +#[cfg_attr(feature = "std", doc = " ```")] +#[cfg_attr(not(feature = "std"), doc = " ```ignore")] +/// use std::vec::Vec; +/// +/// use libafl::{ +/// inputs::MutVecInput, +/// mutators::{ +/// ByteIncMutator, MappedInputFunctionMappingMutator, MutationResult, Mutator, +/// ToFunctionMappingMutatorMapper, +/// }, +/// state::NopState, +/// }; +/// +/// use libafl_bolts::tuples::{tuple_list, Map}; +/// +/// type CustomInput = (Vec,); +/// fn extract_to_ref(input: &mut CustomInput) -> &mut Vec { +/// &mut input.0 +/// } +/// +/// fn extract_from_ref(input: &mut Vec) -> MutVecInput<'_> { +/// input.into() +/// } +/// +/// // construct a mapper that works on &mut Vec +/// let inner: MappedInputFunctionMappingMutator<_, _, MutVecInput<'_>> = +/// MappedInputFunctionMappingMutator::new(extract_from_ref, ByteIncMutator::new()); +/// let inner_list = tuple_list!(inner); +/// let outer_list = inner_list.map(ToFunctionMappingMutatorMapper::new(extract_to_ref)); +/// let mut outer = outer_list.0; +/// +/// let mut input: CustomInput = (vec![1],); +/// +/// let mut state: NopState = NopState::new(); +/// let res = outer.mutate(&mut state, &mut input).unwrap(); +/// assert_eq!(res, MutationResult::Mutated); +/// assert_eq!(input, (vec![2],)); +/// ``` +#[derive(Debug)] +pub struct ToFunctionMappingMutatorMapper { + mapper: F, +} + +impl ToFunctionMappingMutatorMapper { + /// Creates a new [`ToFunctionMappingMutatorMapper`] + pub fn new(mapper: F) -> Self { + Self { mapper } + } +} + +impl MappingFunctor for ToFunctionMappingMutatorMapper +where + F: Clone, + M: Named, +{ + type Output = FunctionMappingMutator; + + fn apply(&mut self, from: M) -> Self::Output { + FunctionMappingMutator::new(self.mapper.clone(), from) + } +} + +/// Mapping [`Mutator`] using a function returning a wrapped reference (see [`MappedInput`]). +/// +/// Allows using [`Mutator`]s for a certain type on (parts of) other input types that can be mapped to this type. +/// +/// # Example +#[cfg_attr(feature = "std", doc = " ```")] +#[cfg_attr(not(feature = "std"), doc = " ```ignore")] +/// use std::vec::Vec; +/// +/// use libafl::{ +/// inputs::MutVecInput, +/// mutators::{ +/// ByteIncMutator, MappedInputFunctionMappingMutator, MutationResult, Mutator, +/// }, +/// state::NopState, +/// }; +/// +/// type CustomInput = (Vec,); +/// fn extract(input: &mut CustomInput) -> MutVecInput<'_> { +/// (&mut input.0).into() +/// } +/// +/// let inner = ByteIncMutator::new(); +/// let mut outer: MappedInputFunctionMappingMutator<_, _, MutVecInput<'_>> = +/// MappedInputFunctionMappingMutator::new(extract, inner); +/// +/// let mut input: CustomInput = (vec![1],); +/// +/// let mut state: NopState = NopState::new(); +/// let res = outer.mutate(&mut state, &mut input).unwrap(); +/// assert_eq!(res, MutationResult::Mutated); +/// assert_eq!(input, (vec![2],)); +/// ``` +#[derive(Debug)] +pub struct MappedInputFunctionMappingMutator { + mapper: F, + inner: M, + name: Cow<'static, str>, + phantom: PhantomData, +} + +impl MappedInputFunctionMappingMutator { + /// Creates a new [`MappedInputFunctionMappingMutator`] + pub fn new(mapper: F, inner: M) -> Self + where + M: Named, + { + let name = Cow::Owned(format!( + "MappedInputFunctionMappingMutator<{}>", + inner.name() + )); + + Self { + mapper, + inner, + name, + phantom: PhantomData, + } + } +} + +impl Mutator for MappedInputFunctionMappingMutator +where + for<'a> M: Mutator, S>, + for<'a> II: MappedInput + 'a, + for<'a> F: FnMut(&'a mut IO) -> II::Type<'a>, +{ + fn mutate(&mut self, state: &mut S, input: &mut IO) -> Result { + let mapped = &mut (self.mapper)(input); + self.inner.mutate(state, mapped) + } +} + +impl Named for MappedInputFunctionMappingMutator { + fn name(&self) -> &Cow<'static, str> { + &self.name + } +} + +/// Mapper to use to map a [`tuple_list`] of [`Mutator`]s using [`MappedInputFunctionMappingMutator`]s. +/// +/// See the explanation of [`MappedInputFunctionMappingMutator`] for details. +/// +/// # Example +#[cfg_attr(feature = "std", doc = " ```")] +#[cfg_attr(not(feature = "std"), doc = " ```ignore")] +/// use std::vec::Vec; +/// +/// use libafl::{ +/// inputs::MutVecInput, +/// mutators::{ +/// ByteIncMutator, MappedInputFunctionMappingMutator, MutationResult, Mutator, +/// ToMappedInputFunctionMappingMutatorMapper, +/// }, +/// state::NopState, +/// }; +/// +/// use libafl_bolts::tuples::{tuple_list, Map}; +/// +/// type CustomInput = (Vec,); +/// fn extract(input: &mut CustomInput) -> MutVecInput<'_> { +/// (&mut input.0).into() +/// } +/// +/// let inner = tuple_list!(ByteIncMutator::new()); +/// let outer_list: (MappedInputFunctionMappingMutator<_, _, MutVecInput<'_>>, _) = +/// inner.map(ToMappedInputFunctionMappingMutatorMapper::new(extract)); +/// let mut outer = outer_list.0; +/// +/// let mut input: CustomInput = (vec![1],); +/// +/// let mut state: NopState = NopState::new(); +/// let res = outer.mutate(&mut state, &mut input).unwrap(); +/// assert_eq!(res, MutationResult::Mutated); +/// assert_eq!(input, (vec![2],)); +/// ``` +#[derive(Debug)] +pub struct ToMappedInputFunctionMappingMutatorMapper { + mapper: F, + phantom: PhantomData, +} + +impl ToMappedInputFunctionMappingMutatorMapper { + /// Creates a new [`ToMappedInputFunctionMappingMutatorMapper`] + pub fn new(mapper: F) -> Self + where + F: FnMut(IO) -> II, + { + Self { + mapper, + phantom: PhantomData, + } + } +} + +impl MappingFunctor for ToMappedInputFunctionMappingMutatorMapper +where + F: Clone, + M: Named, +{ + type Output = MappedInputFunctionMappingMutator; + + fn apply(&mut self, from: M) -> Self::Output { + MappedInputFunctionMappingMutator::new(self.mapper.clone(), from) + } +} + +/// Mapping [`Mutator`] for dealing with input parts wrapped in [`Option`]. +/// +/// Allows using [`Mutator`]s for a certain type on (parts of) other input types that can be mapped to an [`Option`] of said type. +/// +/// Returns [`MutationResult::Skipped`] if the mapper returns [`None`]. +/// +/// # Example +#[cfg_attr(feature = "std", doc = " ```")] +#[cfg_attr(not(feature = "std"), doc = " ```ignore")] +/// use libafl::{ +/// inputs::MutVecInput, +/// mutators::{ByteIncMutator, MutationResult, Mutator, OptionMappingMutator}, +/// state::NopState, +/// }; +/// +/// let inner = ByteIncMutator::new(); +/// let mut outer = OptionMappingMutator::new(inner); +/// +/// let mut input_raw = vec![1]; +/// let input: MutVecInput = (&mut input_raw).into(); +/// let mut input_wrapped = Some(input); +/// let mut state: NopState> = NopState::new(); +/// let res = outer.mutate(&mut state, &mut input_wrapped).unwrap(); +/// assert_eq!(res, MutationResult::Mutated); +/// assert_eq!(input_raw, vec![2]); +/// +/// let mut empty_input: Option = None; +/// let res2 = outer.mutate(&mut state, &mut empty_input).unwrap(); +/// assert_eq!(res2, MutationResult::Skipped); +/// ``` +#[derive(Debug)] +pub struct OptionMappingMutator { + inner: M, + name: Cow<'static, str>, +} + +impl OptionMappingMutator { + /// Creates a new [`OptionMappingMutator`] + pub fn new(inner: M) -> Self + where + M: Named, + { + let name = Cow::Owned(format!("OptionMappingMutator<{}>", inner.name())); + Self { inner, name } + } +} + +impl Mutator, S> for OptionMappingMutator +where + M: Mutator, +{ + fn mutate(&mut self, state: &mut S, input: &mut Option) -> Result { + match input { + None => Ok(MutationResult::Skipped), + Some(i) => self.inner.mutate(state, i), + } + } +} + +impl Named for OptionMappingMutator +where + M: Named, +{ + fn name(&self) -> &Cow<'static, str> { + &self.name + } +} + +/// Mapper to use to map a [`tuple_list`] of [`Mutator`]s using [`OptionMappingMutator`]s. +/// +/// See the explanation of [`OptionMappingMutator`] for details. +/// +/// # Example +#[cfg_attr(feature = "std", doc = " ```")] +#[cfg_attr(not(feature = "std"), doc = " ```ignore")] +/// use libafl::{ +/// inputs::MutVecInput, +/// mutators::{ByteIncMutator, MutationResult, Mutator, ToOptionMappingMutatorMapper}, +/// state::NopState, +/// }; +/// use libafl_bolts::tuples::{tuple_list, Map}; +/// +/// let inner = tuple_list!(ByteIncMutator::new()); +/// let outer_list = inner.map(ToOptionMappingMutatorMapper); +/// let mut outer = outer_list.0; +/// +/// let mut input_raw = vec![1]; +/// let input: MutVecInput = (&mut input_raw).into(); +/// let mut input_wrapped = Some(input); +/// let mut state: NopState> = NopState::new(); +/// let res = outer.mutate(&mut state, &mut input_wrapped).unwrap(); +/// assert_eq!(res, MutationResult::Mutated); +/// assert_eq!(input_raw, vec![2]); +/// +/// let mut empty_input: Option = None; +/// let res2 = outer.mutate(&mut state, &mut empty_input).unwrap(); +/// assert_eq!(res2, MutationResult::Skipped); +/// ``` +#[derive(Debug)] +pub struct ToOptionMappingMutatorMapper; + +impl MappingFunctor for ToOptionMappingMutatorMapper +where + M: Named, +{ + type Output = OptionMappingMutator; + + fn apply(&mut self, from: M) -> Self::Output { + OptionMappingMutator::new(from) + } +} diff --git a/libafl/src/mutators/mod.rs b/libafl/src/mutators/mod.rs index bf13fe318c..73ab635a1d 100644 --- a/libafl/src/mutators/mod.rs +++ b/libafl/src/mutators/mod.rs @@ -1,5 +1,7 @@ //! [`Mutator`]`s` mutate input during fuzzing. - +//! +//! These can be used standalone or in combination with other mutators to explore the input space more effectively. +//! You can read more about mutators in the [libAFL book](https://aflplus.plus/libafl-book/core_concepts/mutator.html) pub mod scheduled; use core::fmt; @@ -9,6 +11,8 @@ pub use mutations::*; pub mod token_mutations; use serde::{Deserialize, Serialize}; pub use token_mutations::*; +pub mod havoc_mutations; +pub use havoc_mutations::*; pub mod encoded_mutations; pub use encoded_mutations::*; pub mod mopt_mutator; @@ -17,6 +21,8 @@ pub mod gramatron; pub use gramatron::*; pub mod grimoire; pub use grimoire::*; +pub mod mapping; +pub use mapping::*; pub mod tuneable; pub use tuneable::*; @@ -94,13 +100,9 @@ pub trait Mutator: Named { fn mutate(&mut self, state: &mut S, input: &mut I) -> Result; /// Post-process given the outcome of the execution - /// `new_corpus_idx` will be `Some` if a new [`crate::corpus::Testcase`] was created this execution. + /// `new_corpus_id` will be `Some` if a new [`crate::corpus::Testcase`] was created this execution. #[inline] - fn post_exec( - &mut self, - _state: &mut S, - _new_corpus_idx: Option, - ) -> Result<(), Error> { + fn post_exec(&mut self, _state: &mut S, _new_corpus_id: Option) -> Result<(), Error> { Ok(()) } } @@ -118,12 +120,12 @@ pub trait MultiMutator: Named { ) -> Result, Error>; /// Post-process given the outcome of the execution - /// `new_corpus_idx` will be `Some` if a new `Testcase` was created this execution. + /// `new_corpus_id` will be `Some` if a new `Testcase` was created this execution. #[inline] fn multi_post_exec( &mut self, _state: &mut S, - _new_corpus_idx: Option, + _new_corpus_id: Option, ) -> Result<(), Error> { Ok(()) } @@ -135,11 +137,11 @@ pub trait MutatorsTuple: HasLen { fn mutate_all(&mut self, state: &mut S, input: &mut I) -> Result; /// Runs the [`Mutator::post_exec`] function on all [`Mutator`]`s` in this `Tuple`. - /// `new_corpus_idx` will be `Some` if a new `Testcase` was created this execution. + /// `new_corpus_id` will be `Some` if a new `Testcase` was created this execution. fn post_exec_all( &mut self, state: &mut S, - new_corpus_idx: Option, + new_corpus_id: Option, ) -> Result<(), Error>; /// Gets the [`Mutator`] at the given index and runs the `mutate` function on it. @@ -151,20 +153,14 @@ pub trait MutatorsTuple: HasLen { ) -> Result; /// Gets the [`Mutator`] at the given index and runs the `post_exec` function on it. - /// `new_corpus_idx` will be `Some` if a new `Testcase` was created this execution. + /// `new_corpus_id` will be `Some` if a new `Testcase` was created this execution. fn get_and_post_exec( &mut self, index: usize, state: &mut S, - corpus_idx: Option, + corpus_id: Option, ) -> Result<(), Error>; - - /// Gets all names of the wrapped [`Mutator`]`s`, reversed. - fn names_reversed(&self) -> Vec<&str>; - - /// Gets all names of the wrapped [`Mutator`]`s`. - fn names(&self) -> Vec<&str>; } impl MutatorsTuple for () { @@ -177,7 +173,7 @@ impl MutatorsTuple for () { fn post_exec_all( &mut self, _state: &mut S, - _new_corpus_idx: Option, + _new_corpus_id: Option, ) -> Result<(), Error> { Ok(()) } @@ -197,20 +193,10 @@ impl MutatorsTuple for () { &mut self, _index: usize, _state: &mut S, - _new_corpus_idx: Option, + _new_corpus_id: Option, ) -> Result<(), Error> { Ok(()) } - - #[inline] - fn names_reversed(&self) -> Vec<&str> { - Vec::new() - } - - #[inline] - fn names(&self) -> Vec<&str> { - Vec::new() - } } impl MutatorsTuple for (Head, Tail) @@ -230,10 +216,10 @@ where fn post_exec_all( &mut self, state: &mut S, - new_corpus_idx: Option, + new_corpus_id: Option, ) -> Result<(), Error> { - self.0.post_exec(state, new_corpus_idx)?; - self.1.post_exec_all(state, new_corpus_idx) + self.0.post_exec(state, new_corpus_id)?; + self.1.post_exec_all(state, new_corpus_id) } fn get_and_mutate( @@ -253,26 +239,14 @@ where &mut self, index: usize, state: &mut S, - new_corpus_idx: Option, + new_corpus_id: Option, ) -> Result<(), Error> { if index == 0 { - self.0.post_exec(state, new_corpus_idx) + self.0.post_exec(state, new_corpus_id) } else { - self.1.get_and_post_exec(index - 1, state, new_corpus_idx) + self.1.get_and_post_exec(index - 1, state, new_corpus_id) } } - - fn names_reversed(&self) -> Vec<&str> { - let mut ret = self.1.names_reversed(); - ret.push(self.0.name()); - ret - } - - fn names(&self) -> Vec<&str> { - let mut ret = self.names_reversed(); - ret.reverse(); - ret - } } impl IntoVec>> for (Head, Tail) @@ -305,9 +279,9 @@ where fn post_exec_all( &mut self, state: &mut S, - new_corpus_idx: Option, + new_corpus_id: Option, ) -> Result<(), Error> { - self.0.post_exec_all(state, new_corpus_idx) + self.0.post_exec_all(state, new_corpus_id) } fn get_and_mutate( @@ -323,17 +297,9 @@ where &mut self, index: usize, state: &mut S, - new_corpus_idx: Option, + new_corpus_id: Option, ) -> Result<(), Error> { - self.0.get_and_post_exec(index, state, new_corpus_idx) - } - - fn names(&self) -> Vec<&str> { - self.0.names() - } - - fn names_reversed(&self) -> Vec<&str> { - self.0.names_reversed() + self.0.get_and_post_exec(index, state, new_corpus_id) } } @@ -361,10 +327,10 @@ impl MutatorsTuple for Vec>> { fn post_exec_all( &mut self, state: &mut S, - new_corpus_idx: Option, + new_corpus_id: Option, ) -> Result<(), Error> { for mutator in self.iter_mut() { - mutator.post_exec(state, new_corpus_idx)?; + mutator.post_exec(state, new_corpus_id)?; } Ok(()) } @@ -385,20 +351,12 @@ impl MutatorsTuple for Vec>> { &mut self, index: usize, state: &mut S, - new_corpus_idx: Option, + new_corpus_id: Option, ) -> Result<(), Error> { let mutator = self .get_mut(index) .ok_or_else(|| Error::key_not_found("Mutator with id {index:?} not found."))?; - mutator.post_exec(state, new_corpus_idx) - } - - fn names_reversed(&self) -> Vec<&str> { - self.iter().rev().map(|x| x.name().as_ref()).collect() - } - - fn names(&self) -> Vec<&str> { - self.iter().map(|x| x.name().as_ref()).collect() + mutator.post_exec(state, new_corpus_id) } } diff --git a/libafl/src/mutators/mopt_mutator.rs b/libafl/src/mutators/mopt_mutator.rs index 583ceb41e4..ae90267358 100644 --- a/libafl/src/mutators/mopt_mutator.rs +++ b/libafl/src/mutators/mopt_mutator.rs @@ -1,12 +1,13 @@ -//! The `MOpt` mutator scheduler, see and +//! The `MOpt` mutation scheduler used in AFL++. +//! +//! It uses a modified Particle Swarm Optimization algorithm to determine an optimal distribution of mutators. +//! See and use alloc::{borrow::Cow, string::ToString, vec::Vec}; -use core::{ - fmt::{self, Debug}, - marker::PhantomData, -}; +use core::fmt::{self, Debug}; use libafl_bolts::{ rands::{Rand, StdRand}, + tuples::NamedTuple, Named, }; use serde::{Deserialize, Serialize}; @@ -20,9 +21,10 @@ use crate::{ }; /// A Struct for managing MOpt-mutator parameters. +/// /// There are 2 modes for `MOpt` scheduler, the core fuzzing mode and the pilot fuzzing mode. /// In short, in the pilot fuzzing mode, the fuzzer employs several `swarms` to compute the probability to choose the mutation operator. -/// On the other hand, in the core fuzzing mode, the fuzzer chooses the best `swarms`, which was determined during the pilot fuzzing mode, to compute the probability to choose the operation operator. +/// On the other hand, in the core fuzzing mode, the fuzzer chooses the best `swarms`, which was determined during the pilot fuzzing mode, to compute the probability to choose the mutation operator. /// With the current implementation we are always in the pacemaker fuzzing mode. #[derive(Serialize, Deserialize, Clone)] #[cfg_attr( @@ -359,35 +361,16 @@ pub enum MOptMode { /// This is the main struct of `MOpt`, an `AFL` mutator. /// See the original `MOpt` implementation in -pub struct StdMOptMutator -where - MT: MutatorsTuple, - S: HasRand + HasMetadata + HasCorpus + HasSolutions, -{ +#[derive(Debug)] +pub struct StdMOptMutator { name: Cow<'static, str>, mode: MOptMode, finds_before: usize, mutations: MT, max_stack_pow: usize, - phantom: PhantomData<(I, S)>, } -impl Debug for StdMOptMutator -where - MT: MutatorsTuple, - S: HasRand + HasMetadata + HasCorpus + HasSolutions, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "StdMOptMutator with {} mutations for Input type {}", - self.mutations.len(), - core::any::type_name::() - ) - } -} - -impl Mutator for StdMOptMutator +impl Mutator for StdMOptMutator where MT: MutatorsTuple, S: HasRand + HasMetadata + HasCorpus + HasSolutions, @@ -399,7 +382,7 @@ where } #[allow(clippy::cast_precision_loss)] - fn post_exec(&mut self, state: &mut S, _new_corpus_idx: Option) -> Result<(), Error> { + fn post_exec(&mut self, state: &mut S, _new_corpus_id: Option) -> Result<(), Error> { let before = self.finds_before; let after = state.corpus().count() + state.solutions().count(); @@ -512,32 +495,36 @@ where } } -impl StdMOptMutator -where - MT: MutatorsTuple, - S: HasRand + HasMetadata + HasCorpus + HasSolutions, -{ +impl StdMOptMutator { /// Create a new [`StdMOptMutator`]. - pub fn new( + pub fn new( state: &mut S, mutations: MT, max_stack_pow: usize, swarm_num: usize, - ) -> Result { + ) -> Result + where + S: HasMetadata + HasRand, + MT: NamedTuple, + { if !state.has_metadata::() { let rand_seed = state.rand_mut().next(); - state.add_metadata::(MOpt::new(mutations.len(), swarm_num, rand_seed)?); + state.add_metadata::(MOpt::new(MT::LEN, swarm_num, rand_seed)?); } + Ok(Self { name: Cow::from(format!("StdMOptMutator[{}]", mutations.names().join(","))), mode: MOptMode::Pilotfuzzing, finds_before: 0, mutations, max_stack_pow, - phantom: PhantomData, }) } - fn core_mutate(&mut self, state: &mut S, input: &mut I) -> Result { + fn core_mutate(&mut self, state: &mut S, input: &mut I) -> Result + where + S: HasMetadata + HasRand + HasSolutions + HasCorpus, + MT: MutatorsTuple, + { let mut r = MutationResult::Skipped; let mopt = state.metadata_map_mut().get_mut::().unwrap(); for i in 0..mopt.operator_num { @@ -560,7 +547,11 @@ where Ok(r) } - fn pilot_mutate(&mut self, state: &mut S, input: &mut I) -> Result { + fn pilot_mutate(&mut self, state: &mut S, input: &mut I) -> Result + where + S: HasMetadata + HasRand + HasSolutions + HasCorpus, + MT: MutatorsTuple, + { let mut r = MutationResult::Skipped; let swarm_now; { @@ -591,11 +582,9 @@ where } } -impl ComposedByMutations for StdMOptMutator -where - MT: MutatorsTuple, - S: HasRand + HasMetadata + HasCorpus + HasSolutions, -{ +impl ComposedByMutations for StdMOptMutator { + type Mutations = MT; + /// Get the mutations #[inline] fn mutations(&self) -> &MT { @@ -609,24 +598,20 @@ where } } -impl Named for StdMOptMutator -where - MT: MutatorsTuple, - S: HasRand + HasMetadata + HasCorpus + HasSolutions, -{ +impl Named for StdMOptMutator { fn name(&self) -> &Cow<'static, str> { &self.name } } -impl ScheduledMutator for StdMOptMutator +impl ScheduledMutator for StdMOptMutator where MT: MutatorsTuple, S: HasRand + HasMetadata + HasCorpus + HasSolutions, { /// Compute the number of iterations used to apply stacked mutations fn iterations(&self, state: &mut S, _: &I) -> u64 { - 1 << (1 + state.rand_mut().below(self.max_stack_pow)) + 1 << (1 + state.rand_mut().zero_upto(self.max_stack_pow)) } /// Get the next mutation to apply diff --git a/libafl/src/mutators/multi.rs b/libafl/src/mutators/multi.rs index 866629ae48..032e35433c 100644 --- a/libafl/src/mutators/multi.rs +++ b/libafl/src/mutators/multi.rs @@ -1,6 +1,9 @@ //! Mutator definitions for [`MultipartInput`]s. See [`crate::inputs::multi`] for details. -use core::cmp::{min, Ordering}; +use core::{ + cmp::{min, Ordering}, + num::NonZero, +}; use libafl_bolts::{rands::Rand, Error}; @@ -41,17 +44,16 @@ where state: &mut S, input: &mut MultipartInput, ) -> Result { - if input.parts().is_empty() { - Ok(MutationResult::Skipped) - } else { - let selected = state.rand_mut().below(input.parts().len()); - let mutated = input.part_mut(selected).unwrap(); - self.mutate(state, mutated) - } + let Some(parts_len) = NonZero::new(input.parts().len()) else { + return Ok(MutationResult::Skipped); + }; + let selected = state.rand_mut().below(parts_len); + let mutated = input.part_mut(selected).unwrap(); + self.mutate(state, mutated) } - fn post_exec(&mut self, state: &mut S, new_corpus_idx: Option) -> Result<(), Error> { - M::post_exec(self, state, new_corpus_idx) + fn post_exec(&mut self, state: &mut S, new_corpus_id: Option) -> Result<(), Error> { + M::post_exec(self, state, new_corpus_id) } } @@ -114,10 +116,11 @@ impl_default_multipart!( I2SRandReplace, ); -impl Mutator, S> for CrossoverInsertMutator +impl Mutator, S> for CrossoverInsertMutator where - S: HasCorpus> + HasMaxSize + HasRand, + S: HasCorpus + HasMaxSize + HasRand, I: Input + HasMutatorBytes, + S::Corpus: Corpus>, { fn mutate( &mut self, @@ -129,9 +132,12 @@ where let part_choice = state.rand_mut().next() as usize; // We special-case crossover with self - let idx = random_corpus_id!(state.corpus(), state.rand_mut()); + let id = random_corpus_id!(state.corpus(), state.rand_mut()); if let Some(cur) = state.corpus().current() { - if idx == *cur { + if id == *cur { + if input.names().is_empty() { + return Ok(MutationResult::Skipped); + } let choice = name_choice % input.names().len(); let name = input.names()[choice].clone(); @@ -150,14 +156,20 @@ where .parts_by_name(&name) .filter(|&(p, _)| p != choice) .nth(part_choice % parts) - .map(|(idx, part)| (idx, part.bytes().len())); + .map(|(id, part)| (id, part.bytes().len())); if let Some((part_idx, size)) = maybe_size { - if size == 0 { // Workaround for a bug where parts are empty. The origin remains unknown. + let Some(nz) = NonZero::new(size) else { return Ok(MutationResult::Skipped); - } - let target = state.rand_mut().below(size); - let range = rand_range(state, other_size, min(other_size, size - target)); + }; + let target = state.rand_mut().below(nz); + // # Safety + // size is nonzero here (checked above), target is smaller than size + // -> the subtraction result is greater than 0. + // other_size is checked above to be larger than zero. + let range = rand_range(state, other_size, unsafe { + NonZero::new(min(other_size, size - target)).unwrap_unchecked() + }); let [part, chosen] = match part_idx.cmp(&choice) { Ordering::Less => input.parts_mut([part_idx, choice]), @@ -170,14 +182,20 @@ where } }; - return Ok(Self::crossover_insert(part, size, target, range, chosen)); + return Ok(Self::crossover_insert( + part, + size, + target, + range, + chosen.bytes(), + )); } return Ok(MutationResult::Skipped); } } - let mut other_testcase = state.corpus().get(idx)?.borrow_mut(); + let mut other_testcase = state.corpus().get(id)?.borrow_mut(); let other = other_testcase.load_input(state.corpus())?; let choice = name_choice % other.names().len(); @@ -197,14 +215,20 @@ where .unwrap(); drop(other_testcase); let size = part.bytes().len(); - - if size == 0 { // Workaround for a bug where parts are empty. The origin remains unknown. + let Some(nz) = NonZero::new(size) else { return Ok(MutationResult::Skipped); - } - let target = state.rand_mut().below(size); - let range = rand_range(state, other_size, min(other_size, size - target)); + }; - let other_testcase = state.corpus().get(idx)?.borrow_mut(); + let target = state.rand_mut().below(nz); + // # Safety + // other_size is larger than 0, checked above. + // size is larger than 0. + // target is smaller than size -> the subtraction is larger than 0. + let range = rand_range(state, other_size, unsafe { + NonZero::new(min(other_size, size - target)).unwrap_unchecked() + }); + + let other_testcase = state.corpus().get(id)?.borrow_mut(); // No need to load the input again, it'll still be cached. let other = other_testcase.input().as_ref().unwrap(); @@ -213,7 +237,7 @@ where size, target, range, - &other.parts()[choice], + other.parts()[choice].bytes(), )) } else { // just add it! @@ -224,10 +248,11 @@ where } } -impl Mutator, S> for CrossoverReplaceMutator +impl Mutator, S> for CrossoverReplaceMutator where - S: HasCorpus> + HasMaxSize + HasRand, + S: HasCorpus + HasMaxSize + HasRand, I: Input + HasMutatorBytes, + S::Corpus: Corpus>, { fn mutate( &mut self, @@ -239,9 +264,12 @@ where let part_choice = state.rand_mut().next() as usize; // We special-case crossover with self - let idx = random_corpus_id!(state.corpus(), state.rand_mut()); + let id = random_corpus_id!(state.corpus(), state.rand_mut()); if let Some(cur) = state.corpus().current() { - if idx == *cur { + if id == *cur { + if input.names().is_empty() { + return Ok(MutationResult::Skipped); + } let choice = name_choice % input.names().len(); let name = input.names()[choice].clone(); @@ -260,14 +288,20 @@ where .parts_by_name(&name) .filter(|&(p, _)| p != choice) .nth(part_choice % parts) - .map(|(idx, part)| (idx, part.bytes().len())); + .map(|(id, part)| (id, part.bytes().len())); if let Some((part_idx, size)) = maybe_size { - if size == 0 { // Workaround for a bug where parts are empty. The origin remains unknown. + let Some(nz) = NonZero::new(size) else { return Ok(MutationResult::Skipped); - } - let target = state.rand_mut().below(size); - let range = rand_range(state, other_size, min(other_size, size - target)); + }; + + let target = state.rand_mut().below(nz); + // # Safety + // other_size is checked above. + // size is larger than than target and larger than 1. The subtraction result will always be positive. + let range = rand_range(state, other_size, unsafe { + NonZero::new(min(other_size, size - target)).unwrap_unchecked() + }); let [part, chosen] = match part_idx.cmp(&choice) { Ordering::Less => input.parts_mut([part_idx, choice]), @@ -280,14 +314,14 @@ where } }; - return Ok(Self::crossover_replace(part, target, range, chosen)); + return Ok(Self::crossover_replace(part, target, range, chosen.bytes())); } return Ok(MutationResult::Skipped); } } - let mut other_testcase = state.corpus().get(idx)?.borrow_mut(); + let mut other_testcase = state.corpus().get(id)?.borrow_mut(); let other = other_testcase.load_input(state.corpus())?; let choice = name_choice % other.names().len(); @@ -307,15 +341,19 @@ where .unwrap(); drop(other_testcase); let size = part.bytes().len(); - - if size == 0 { // Workaround for a bug where parts are empty. The origin remains unknown. + let Some(nz) = NonZero::new(size) else { return Ok(MutationResult::Skipped); - } + }; - let target = state.rand_mut().below(size); - let range = rand_range(state, other_size, min(other_size, size - target)); + let target = state.rand_mut().below(nz); + // # Safety + // other_size is checked above. + // size is larger than than target and larger than 1. The subtraction result will always be positive. + let range = rand_range(state, other_size, unsafe { + NonZero::new(min(other_size, size - target)).unwrap_unchecked() + }); - let other_testcase = state.corpus().get(idx)?.borrow_mut(); + let other_testcase = state.corpus().get(id)?.borrow_mut(); // No need to load the input again, it'll still be cached. let other = other_testcase.input().as_ref().unwrap(); @@ -323,7 +361,7 @@ where part, target, range, - &other.parts()[choice], + other.parts()[choice].bytes(), )) } else { // just add it! diff --git a/libafl/src/mutators/mutations.rs b/libafl/src/mutators/mutations.rs index 016a927944..4c014fca9b 100644 --- a/libafl/src/mutators/mutations.rs +++ b/libafl/src/mutators/mutations.rs @@ -4,7 +4,13 @@ use alloc::{ borrow::{Cow, ToOwned}, vec::Vec, }; -use core::{cmp::min, marker::PhantomData, mem::size_of, ops::Range}; +use core::{ + cmp::min, + marker::PhantomData, + mem::size_of, + num::{NonZero, NonZeroUsize}, + ops::Range, +}; use libafl_bolts::{rands::Rand, Named}; @@ -12,7 +18,7 @@ use crate::{ corpus::Corpus, inputs::HasMutatorBytes, mutators::{MutationResult, Mutator}, - random_corpus_id_with_disabled, + nonzero, random_corpus_id_with_disabled, state::{HasCorpus, HasMaxSize, HasRand}, Error, }; @@ -64,10 +70,10 @@ pub fn buffer_set(data: &mut [T], from: usize, len: usize, val: T) { /// /// This problem corresponds to: #[inline] -pub fn rand_range(state: &mut S, upper: usize, max_len: usize) -> Range { +pub fn rand_range(state: &mut S, upper: usize, max_len: NonZeroUsize) -> Range { let len = 1 + state.rand_mut().below(max_len); // sample from [1..upper + len] - let mut offset2 = 1 + state.rand_mut().below(upper + len - 1); + let mut offset2 = 1 + state.rand_mut().zero_upto(upper + len - 1); let offset1 = offset2.saturating_sub(len); if offset2 > upper { offset2 = upper; @@ -305,7 +311,7 @@ where Ok(MutationResult::Skipped) } else { let byte = state.rand_mut().choose(input.bytes_mut()).unwrap(); - *byte ^= 1 + state.rand_mut().below(254) as u8; + *byte ^= 1 + state.rand_mut().below(nonzero!(254)) as u8; Ok(MutationResult::Mutated) } } @@ -356,8 +362,8 @@ macro_rules! add_mutator_impl { let val = <$size>::from_ne_bytes(bytes.try_into().unwrap()); // mutate - let num = 1 + state.rand_mut().below(ARITH_MAX) as $size; - let new_val = match state.rand_mut().below(4) { + let num = 1 + state.rand_mut().below(nonzero!(ARITH_MAX)) as $size; + let new_val = match state.rand_mut().below(nonzero!(4)) { 0 => val.wrapping_add(num), 1 => val.wrapping_sub(num), 2 => val.swap_bytes().wrapping_add(num).swap_bytes(), @@ -414,7 +420,11 @@ macro_rules! interesting_mutator_impl { } else { let bytes = input.bytes_mut(); let upper_bound = (bytes.len() + 1 - size_of::<$size>()); - let idx = state.rand_mut().below(upper_bound); + // # Safety + // the length is at least as large as the size here (checked above), and we add a 1 -> never zero. + let idx = state + .rand_mut() + .below(unsafe { NonZero::new(upper_bound).unwrap_unchecked() }); let val = *state.rand_mut().choose(&$interesting).unwrap() as $size; let new_bytes = match state.rand_mut().choose(&[0, 1]).unwrap() { 0 => val.to_be_bytes(), @@ -462,7 +472,11 @@ where return Ok(MutationResult::Skipped); } - let range = rand_range(state, size, size - 1); + // # Safety + // size - 1 is guaranteed to be larger than 0 because we abort on size <= 2 above. + let range = rand_range(state, size, unsafe { + NonZero::new(size - 1).unwrap_unchecked() + }); input.drain(range); @@ -501,7 +515,11 @@ where return Ok(MutationResult::Skipped); } - let range = rand_range(state, size, min(16, max_size - size)); + // # Safety + // max_size - size is larger than 0 because we check that size < max_size above + let range = rand_range(state, size, unsafe { + NonZero::new(min(16, max_size - size)).unwrap_unchecked() + }); input.resize(size + range.len(), 0); unsafe { @@ -548,8 +566,13 @@ where return Ok(MutationResult::Skipped); } - let mut amount = 1 + state.rand_mut().below(16); - let offset = state.rand_mut().below(size + 1); + let mut amount = 1 + state.rand_mut().below(nonzero!(16)); + // # Safety + // It's a safe assumption that size + 1 is never 0. + // If we wrap around we have _a lot_ of elements - and the code will break later anyway. + let offset = state + .rand_mut() + .below(unsafe { NonZero::new(size + 1).unwrap_unchecked() }); if size + amount > max_size { if max_size > size { @@ -559,7 +582,11 @@ where } } - let val = input.bytes()[state.rand_mut().below(size)]; + // # Safety + // size is larger than 0, checked above. + let val = input.bytes()[state + .rand_mut() + .below(unsafe { NonZero::new(size).unwrap_unchecked() })]; input.resize(size + amount, 0); unsafe { @@ -602,8 +629,12 @@ where return Ok(MutationResult::Skipped); } - let mut amount = 1 + state.rand_mut().below(16); - let offset = state.rand_mut().below(size + 1); + let mut amount = 1 + state.rand_mut().below(nonzero!(16)); + // # Safety + // size + 1 can never be 0 + let offset = state + .rand_mut() + .below(unsafe { NonZero::new(size.wrapping_add(1)).unwrap_unchecked() }); if size + amount > max_size { if max_size > size { @@ -654,7 +685,11 @@ where if size == 0 { return Ok(MutationResult::Skipped); } - let range = rand_range(state, size, min(size, 16)); + // # Safety + // Size is larger than 0, checked above (and 16 is also lager than 0 FWIW) + let range = rand_range(state, size, unsafe { + NonZero::new(min(size, 16)).unwrap_unchecked() + }); let val = *state.rand_mut().choose(input.bytes()).unwrap(); let quantity = range.len(); @@ -693,7 +728,11 @@ where if size == 0 { return Ok(MutationResult::Skipped); } - let range = rand_range(state, size, min(size, 16)); + // # Safety + // Size is larger than 0, checked above. 16 is larger than 0, according to my math teacher. + let range = rand_range(state, size, unsafe { + NonZero::new(min(size, 16)).unwrap_unchecked() + }); let val = state.rand_mut().next() as u8; let quantity = range.len(); @@ -733,8 +772,16 @@ where return Ok(MutationResult::Skipped); } - let target = state.rand_mut().below(size); - let range = rand_range(state, size, size - target); + // # Safety + // size is always larger than 0 here (checked above) + let target = state + .rand_mut() + .below(unsafe { NonZero::new(size).unwrap_unchecked() }); + // # Safety + // target is smaller than size (`below` is exclusive) -> The subtraction is always larger than 0 + let range = rand_range(state, size, unsafe { + NonZero::new(size - target).unwrap_unchecked() + }); unsafe { buffer_self_copy(input.bytes_mut(), range.start, target, range.len()); @@ -776,10 +823,20 @@ where return Ok(MutationResult::Skipped); } - let target = state.rand_mut().below(size); + // # Safety + // We checked that size is larger than 0 above. + let target = state + .rand_mut() + .below(unsafe { NonZero::new(size).unwrap_unchecked() }); // make sure that the sampled range is both in bounds and of an acceptable size let max_insert_len = min(size - target, state.max_size() - size); - let range = rand_range(state, size, min(16, max_insert_len)); + let max_insert_len = min(16, max_insert_len); + + // # Safety + // size > target and state.max_size() > size + let max_insert_len = unsafe { NonZero::new(max_insert_len).unwrap_unchecked() }; + + let range = rand_range(state, size, max_insert_len); input.resize(size + range.len(), 0); self.tmp_buf.resize(range.len(), 0); @@ -837,11 +894,19 @@ where return Ok(MutationResult::Skipped); } - let first = rand_range(state, size, size); + // # Safety + // size is larger than 0, checked above. + let first = rand_range(state, size, unsafe { + NonZero::new(size).unwrap_unchecked() + }); if state.rand_mut().next() & 1 == 0 && first.start != 0 { // The second range comes before first. - let second = rand_range(state, first.start, first.start); + // # Safety + // first.start is larger than 0, checked above. + let second = rand_range(state, first.start, unsafe { + NonZero::new(first.start).unwrap_unchecked() + }); self.tmp_buf.resize(first.len(), 0); unsafe { // If range first is larger @@ -921,8 +986,11 @@ where } Ok(MutationResult::Mutated) } else if first.end != size { - // The first range comes before the second range - let mut second = rand_range(state, size - first.end, size - first.end); + // # Safety + // first.end is not equal to size, so subtracting them can never be 0. + let mut second = rand_range(state, size - first.end, unsafe { + NonZero::new(size - first.end).unwrap_unchecked() + }); second.start += first.end; second.end += first.end; @@ -1025,18 +1093,19 @@ impl BytesSwapMutator { /// Crossover insert mutation for inputs with a bytes vector #[derive(Debug, Default)] -pub struct CrossoverInsertMutator { - phantom: PhantomData, -} +pub struct CrossoverInsertMutator; -impl CrossoverInsertMutator { - pub(crate) fn crossover_insert( +impl CrossoverInsertMutator { + pub(crate) fn crossover_insert( input: &mut I, size: usize, target: usize, range: Range, - other: &I2, - ) -> MutationResult { + other: &[u8], + ) -> MutationResult + where + I: HasMutatorBytes, + { input.resize(size + range.len(), 0); unsafe { buffer_self_copy( @@ -1048,42 +1117,39 @@ impl CrossoverInsertMutator { } unsafe { - buffer_copy( - input.bytes_mut(), - other.bytes(), - range.start, - target, - range.len(), - ); + buffer_copy(input.bytes_mut(), other, range.start, target, range.len()); } MutationResult::Mutated } } -impl Mutator for CrossoverInsertMutator +impl Mutator for CrossoverInsertMutator where S: HasCorpus + HasRand + HasMaxSize, - S::Input: HasMutatorBytes, + ::Input: HasMutatorBytes, I: HasMutatorBytes, { fn mutate(&mut self, state: &mut S, input: &mut I) -> Result { let size = input.bytes().len(); + let Some(nonzero_size) = NonZero::new(size) else { + return Ok(MutationResult::Skipped); + }; + let max_size = state.max_size(); if size >= max_size { return Ok(MutationResult::Skipped); } + let id = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); // We don't want to use the testcase we're already using for splicing - let idx = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); - if let Some(cur) = state.corpus().current() { - if idx == *cur { + if id == *cur { return Ok(MutationResult::Skipped); } } let other_size = { - let mut other_testcase = state.corpus().get_from_all(idx)?.borrow_mut(); + let mut other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); other_testcase.load_input(state.corpus())?.bytes().len() }; @@ -1091,64 +1157,68 @@ where return Ok(MutationResult::Skipped); } - let range = rand_range(state, other_size, min(other_size, max_size - size)); - let target = state.rand_mut().below(size); + // # Safety + // other_size is checked above. + // size is smaller than max_size (also checked above) -> the subtraction result is larger than 0. + let range = rand_range(state, other_size, unsafe { + NonZero::new(min(other_size, max_size - size)).unwrap_unchecked() + }); + let target = state.rand_mut().below(nonzero_size); - let other_testcase = state.corpus().get_from_all(idx)?.borrow_mut(); + let other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); // No need to load the input again, it'll still be cached. let other = other_testcase.input().as_ref().unwrap(); - Ok(Self::crossover_insert(input, size, target, range, other)) + Ok(Self::crossover_insert( + input, + size, + target, + range, + other.bytes(), + )) } } -impl Named for CrossoverInsertMutator { +impl Named for CrossoverInsertMutator { fn name(&self) -> &Cow<'static, str> { static NAME: Cow<'static, str> = Cow::Borrowed("CrossoverInsertMutator"); &NAME } } -impl CrossoverInsertMutator { +impl CrossoverInsertMutator { /// Creates a new [`CrossoverInsertMutator`]. #[must_use] pub fn new() -> Self { - Self { - phantom: PhantomData, - } + Self {} } } /// Crossover replace mutation for inputs with a bytes vector #[derive(Debug, Default)] -pub struct CrossoverReplaceMutator { - phantom: PhantomData, -} +pub struct CrossoverReplaceMutator; -impl CrossoverReplaceMutator { - pub(crate) fn crossover_replace( +impl CrossoverReplaceMutator { + pub(crate) fn crossover_replace( input: &mut I, target: usize, range: Range, - other: &I2, - ) -> MutationResult { + other: &[u8], + ) -> MutationResult + where + I: HasMutatorBytes, + { unsafe { - buffer_copy( - input.bytes_mut(), - other.bytes(), - range.start, - target, - range.len(), - ); + buffer_copy(input.bytes_mut(), other, range.start, target, range.len()); } MutationResult::Mutated } } -impl Mutator for CrossoverReplaceMutator +impl Mutator for CrossoverReplaceMutator where S: HasCorpus + HasRand, - S::Input: HasMutatorBytes, + ::Input: HasMutatorBytes, I: HasMutatorBytes, { fn mutate(&mut self, state: &mut S, input: &mut I) -> Result { @@ -1157,16 +1227,16 @@ where return Ok(MutationResult::Skipped); } + let id = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); // We don't want to use the testcase we're already using for splicing - let idx = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); if let Some(cur) = state.corpus().current() { - if idx == *cur { + if id == *cur { return Ok(MutationResult::Skipped); } } let other_size = { - let mut testcase = state.corpus().get_from_all(idx)?.borrow_mut(); + let mut testcase = state.corpus().get_from_all(id)?.borrow_mut(); testcase.load_input(state.corpus())?.bytes().len() }; @@ -1174,34 +1244,248 @@ where return Ok(MutationResult::Skipped); } - let target = state.rand_mut().below(size); - let range = rand_range(state, other_size, min(other_size, size - target)); + // # Safety + // Size is > 0 here (checked above) + let target = state + .rand_mut() + .below(unsafe { NonZero::new(size).unwrap_unchecked() }); + // # Safety + // other_size is checked above. + // target is smaller than size (since below is exclusive) -> the subtraction result is larger than 0. + let range = rand_range(state, other_size, unsafe { + NonZero::new(min(other_size, size - target)).unwrap_unchecked() + }); - let other_testcase = state.corpus().get_from_all(idx)?.borrow_mut(); + let other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); // No need to load the input again, it'll still be cached. let other = other_testcase.input().as_ref().unwrap(); - Ok(Self::crossover_replace(input, target, range, other)) + Ok(Self::crossover_replace(input, target, range, other.bytes())) } } -impl Named for CrossoverReplaceMutator { +impl Named for CrossoverReplaceMutator { fn name(&self) -> &Cow<'static, str> { static NAME: Cow<'static, str> = Cow::Borrowed("CrossoverReplaceMutator"); &NAME } } -impl CrossoverReplaceMutator { +impl CrossoverReplaceMutator { /// Creates a new [`CrossoverReplaceMutator`]. #[must_use] pub fn new() -> Self { + Self {} + } +} + +trait IntoOptionBytes { + type Type<'b>; + + fn into_option_bytes<'a>(self) -> Option<&'a [u8]> + where + Self: 'a; +} + +impl IntoOptionBytes for &[u8] { + type Type<'b> = &'b [u8]; + + fn into_option_bytes<'b>(self) -> Option<&'b [u8]> + where + Self: 'b, + { + Some(self) + } +} + +impl IntoOptionBytes for Option<&[u8]> { + type Type<'b> = Option<&'b [u8]>; + + fn into_option_bytes<'b>(self) -> Option<&'b [u8]> + where + Self: 'b, + { + self + } +} + +/// Crossover insert mutation for inputs mapped to a bytes vector +#[derive(Debug)] +pub struct MappedCrossoverInsertMutator { + input_mapper: F, + phantom: PhantomData, +} + +impl MappedCrossoverInsertMutator { + /// Creates a new [`MappedCrossoverInsertMutator`] + pub fn new(input_mapper: F) -> Self { Self { + input_mapper, phantom: PhantomData, } } } +impl Mutator for MappedCrossoverInsertMutator +where + S: HasCorpus + HasMaxSize + HasRand, + I: HasMutatorBytes, + for<'a> O: IntoOptionBytes, + for<'a> O::Type<'a>: IntoOptionBytes, + for<'a> F: Fn(&'a ::Input) -> ::Type<'a>, +{ + fn mutate(&mut self, state: &mut S, input: &mut I) -> Result { + let size = input.bytes().len(); + let max_size = state.max_size(); + // TODO: fix bug if size is 0 (?) + if size >= max_size || size == 0 { + return Ok(MutationResult::Skipped); + } + + let id = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); + // We don't want to use the testcase we're already using for splicing + if let Some(cur) = state.corpus().current() { + if id == *cur { + return Ok(MutationResult::Skipped); + } + } + + let other_size = { + let mut other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); + let other_input = other_testcase.load_input(state.corpus())?; + let input_mapped = (self.input_mapper)(other_input).into_option_bytes(); + input_mapped.map_or(0, <[u8]>::len) + }; + + if other_size < 2 { + return Ok(MutationResult::Skipped); + } + + // # Safety + // other_size is checked to be larger than 0 + // max_size is checked to be larger than size, so the subtraction will always be positive and non-0 + let range = rand_range(state, other_size, unsafe { + NonZero::new(min(other_size, max_size - size)).unwrap_unchecked() + }); + // # Safety + // size is checked above to never be 0. + let target = state + .rand_mut() + .below(unsafe { NonZero::new(size).unwrap_unchecked() }); + + let other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); + // No need to load the input again, it'll still be cached. + let other_input = &mut other_testcase.input().as_ref().unwrap(); + let wrapped_mapped_other_input = (self.input_mapper)(other_input).into_option_bytes(); + if wrapped_mapped_other_input.is_none() { + return Ok(MutationResult::Skipped); + } + let mapped_other_input = wrapped_mapped_other_input.unwrap(); + + Ok(CrossoverInsertMutator::crossover_insert( + input, + size, + target, + range, + mapped_other_input, + )) + } +} + +impl Named for MappedCrossoverInsertMutator { + fn name(&self) -> &Cow<'static, str> { + static NAME: Cow<'static, str> = Cow::Borrowed("MappedCrossoverInsertMutator"); + &NAME + } +} + +/// Crossover replace mutation for inputs mapped to a bytes vector +#[derive(Debug)] +pub struct MappedCrossoverReplaceMutator { + input_mapper: F, + phantom: PhantomData, +} + +impl MappedCrossoverReplaceMutator { + /// Creates a new [`MappedCrossoverReplaceMutator`] + pub fn new(input_mapper: F) -> Self { + Self { + input_mapper, + phantom: PhantomData, + } + } +} + +impl Mutator for MappedCrossoverReplaceMutator +where + S: HasCorpus + HasMaxSize + HasRand, + I: HasMutatorBytes, + O: IntoOptionBytes, + for<'a> O::Type<'a>: IntoOptionBytes, + for<'a> F: Fn(&'a ::Input) -> ::Type<'a>, +{ + fn mutate(&mut self, state: &mut S, input: &mut I) -> Result { + let size = input.bytes().len(); + if size == 0 { + return Ok(MutationResult::Skipped); + } + + let id = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); + // We don't want to use the testcase we're already using for splicing + if let Some(cur) = state.corpus().current() { + if id == *cur { + return Ok(MutationResult::Skipped); + } + } + + let other_size = { + let mut other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); + let other_input = other_testcase.load_input(state.corpus())?; + let input_mapped = (self.input_mapper)(other_input).into_option_bytes(); + input_mapped.map_or(0, <[u8]>::len) + }; + + if other_size < 2 { + return Ok(MutationResult::Skipped); + } + + // # Safety + // We checked for size == 0 above. + let target = state + .rand_mut() + .below(unsafe { NonZero::new(size).unwrap_unchecked() }); + // # Safety + // other_size is checked above to not be 0. + // size is larger than target since below is exclusive -> subtraction is always non-0. + let range = rand_range(state, other_size, unsafe { + NonZero::new(min(other_size, size - target)).unwrap_unchecked() + }); + + let other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); + // No need to load the input again, it'll still be cached. + let other_input = &mut other_testcase.input().as_ref().unwrap(); + let wrapped_mapped_other_input = (self.input_mapper)(other_input).into_option_bytes(); + if wrapped_mapped_other_input.is_none() { + return Ok(MutationResult::Skipped); + } + let mapped_other_input = wrapped_mapped_other_input.unwrap(); + + Ok(CrossoverReplaceMutator::crossover_replace( + input, + target, + range, + mapped_other_input, + )) + } +} + +impl Named for MappedCrossoverReplaceMutator { + fn name(&self) -> &Cow<'static, str> { + static NAME: Cow<'static, str> = Cow::Borrowed("MappedCrossoverReplaceMutator"); + &NAME + } +} + /// Returns the first and last diff position between the given vectors, stopping at the min len fn locate_diffs(this: &[u8], other: &[u8]) -> (i64, i64) { let mut first_diff: i64 = -1; @@ -1223,23 +1507,24 @@ fn locate_diffs(this: &[u8], other: &[u8]) -> (i64, i64) { #[derive(Debug, Default)] pub struct SpliceMutator; -impl Mutator for SpliceMutator +impl Mutator for SpliceMutator where S: HasCorpus + HasRand, - S::Input: HasMutatorBytes, + ::Input: HasMutatorBytes, + I: HasMutatorBytes, { #[allow(clippy::cast_sign_loss)] - fn mutate(&mut self, state: &mut S, input: &mut S::Input) -> Result { + fn mutate(&mut self, state: &mut S, input: &mut I) -> Result { + let id = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); // We don't want to use the testcase we're already using for splicing - let idx = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); if let Some(cur) = state.corpus().current() { - if idx == *cur { + if id == *cur { return Ok(MutationResult::Skipped); } } let (first_diff, last_diff) = { - let mut other_testcase = state.corpus().get_from_all(idx)?.borrow_mut(); + let mut other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); let other = other_testcase.load_input(state.corpus())?; let (f, l) = locate_diffs(input.bytes(), other.bytes()); @@ -1253,7 +1538,7 @@ where let split_at = state.rand_mut().between(first_diff, last_diff); - let other_testcase = state.corpus().get_from_all(idx)?.borrow_mut(); + let other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); // Input will already be loaded. let other = other_testcase.input().as_ref().unwrap(); diff --git a/libafl/src/mutators/nautilus.rs b/libafl/src/mutators/nautilus.rs index aa633f7f2d..02ee3a50c0 100644 --- a/libafl/src/mutators/nautilus.rs +++ b/libafl/src/mutators/nautilus.rs @@ -1,5 +1,5 @@ //! Mutators for the `Nautilus` grammmar fuzzer - +//! See use alloc::borrow::Cow; use core::fmt::Debug; @@ -162,7 +162,7 @@ impl Debug for NautilusSpliceMutator<'_> { impl Mutator for NautilusSpliceMutator<'_> where - S: HasCorpus + HasMetadata + HasRand, + S: HasCorpus + HasMetadata + HasRand, { fn mutate( &mut self, diff --git a/libafl/src/mutators/scheduled.rs b/libafl/src/mutators/scheduled.rs index 57cb5a76d7..dc3d76a02e 100644 --- a/libafl/src/mutators/scheduled.rs +++ b/libafl/src/mutators/scheduled.rs @@ -2,14 +2,14 @@ use alloc::{borrow::Cow, vec::Vec}; use core::{ - fmt::{self, Debug}, - marker::PhantomData, + fmt::Debug, + num::NonZero, ops::{Deref, DerefMut}, }; use libafl_bolts::{ rands::Rand, - tuples::{tuple_list, tuple_list_type, Merge, NamedTuple}, + tuples::{tuple_list, tuple_list_type, HasConstLen, NamedTuple}, Named, }; use serde::{Deserialize, Serialize}; @@ -18,17 +18,10 @@ use super::MutationId; use crate::{ corpus::{Corpus, CorpusId}, mutators::{ - mutations::{ - BitFlipMutator, ByteAddMutator, ByteDecMutator, ByteFlipMutator, ByteIncMutator, - ByteInterestingMutator, ByteNegMutator, ByteRandMutator, BytesCopyMutator, - BytesDeleteMutator, BytesExpandMutator, BytesInsertCopyMutator, BytesInsertMutator, - BytesRandInsertMutator, BytesRandSetMutator, BytesSetMutator, BytesSwapMutator, - CrossoverInsertMutator, CrossoverReplaceMutator, DwordAddMutator, - DwordInterestingMutator, QwordAddMutator, WordAddMutator, WordInterestingMutator, - }, token_mutations::{TokenInsert, TokenReplace}, MutationResult, Mutator, MutatorsTuple, }, + nonzero, state::{HasCorpus, HasRand}, Error, HasMetadata, }; @@ -68,21 +61,20 @@ impl LogMutationMetadata { } /// A [`Mutator`] that composes multiple mutations into one. -pub trait ComposedByMutations -where - MT: MutatorsTuple, -{ +pub trait ComposedByMutations { + /// The mutations of this + type Mutations; /// Get the mutations - fn mutations(&self) -> &MT; + fn mutations(&self) -> &Self::Mutations; /// Get the mutations (mutable) - fn mutations_mut(&mut self) -> &mut MT; + fn mutations_mut(&mut self) -> &mut Self::Mutations; } /// A [`Mutator`] scheduling multiple [`Mutator`]s for an input. -pub trait ScheduledMutator: ComposedByMutations + Mutator +pub trait ScheduledMutator: ComposedByMutations + Mutator where - MT: MutatorsTuple, + Self::Mutations: MutatorsTuple, { /// Compute the number of iterations used to apply stacked mutations fn iterations(&self, state: &mut S, input: &I) -> u64; @@ -107,43 +99,20 @@ where } /// A [`Mutator`] that schedules one of the embedded mutations on each call. -pub struct StdScheduledMutator -where - MT: MutatorsTuple, - S: HasRand, -{ +#[derive(Debug)] +pub struct StdScheduledMutator { name: Cow<'static, str>, mutations: MT, max_stack_pow: usize, - phantom: PhantomData<(I, S)>, } -impl Debug for StdScheduledMutator -where - MT: MutatorsTuple, - S: HasRand, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "StdScheduledMutator with {} mutations for Input type {}", - self.mutations.len(), - core::any::type_name::() - ) - } -} - -impl Named for StdScheduledMutator -where - MT: MutatorsTuple, - S: HasRand, -{ +impl Named for StdScheduledMutator { fn name(&self) -> &Cow<'static, str> { &self.name } } -impl Mutator for StdScheduledMutator +impl Mutator for StdScheduledMutator where MT: MutatorsTuple, S: HasRand, @@ -154,11 +123,8 @@ where } } -impl ComposedByMutations for StdScheduledMutator -where - MT: MutatorsTuple, - S: HasRand, -{ +impl ComposedByMutations for StdScheduledMutator { + type Mutations = MT; /// Get the mutations #[inline] fn mutations(&self) -> &MT { @@ -172,27 +138,31 @@ where } } -impl ScheduledMutator for StdScheduledMutator +impl ScheduledMutator for StdScheduledMutator where MT: MutatorsTuple, S: HasRand, { /// Compute the number of iterations used to apply stacked mutations fn iterations(&self, state: &mut S, _: &I) -> u64 { - 1 << (1 + state.rand_mut().below(self.max_stack_pow)) + 1 << (1 + state.rand_mut().zero_upto(self.max_stack_pow)) } /// Get the next mutation to apply fn schedule(&self, state: &mut S, _: &I) -> MutationId { - debug_assert!(self.mutations.len() != 0); - state.rand_mut().below(self.mutations.len()).into() + debug_assert_ne!(self.mutations.len(), 0); + // # Safety + // We check for empty mutations + state + .rand_mut() + .below(unsafe { NonZero::new(self.mutations.len()).unwrap_unchecked() }) + .into() } } -impl StdScheduledMutator +impl StdScheduledMutator where - MT: MutatorsTuple, - S: HasRand, + MT: NamedTuple, { /// Create a new [`StdScheduledMutator`] instance specifying mutations pub fn new(mutations: MT) -> Self { @@ -203,135 +173,26 @@ where )), mutations, max_stack_pow: 7, - phantom: PhantomData, } } /// Create a new [`StdScheduledMutator`] instance specifying mutations and the maximun number of iterations + /// + /// # Errors + /// Will return [`Error::IllegalArgument`] for `max_stack_pow` of 0. + #[inline] pub fn with_max_stack_pow(mutations: MT, max_stack_pow: usize) -> Self { - StdScheduledMutator { + Self { name: Cow::from(format!( "StdScheduledMutator[{}]", mutations.names().join(", ") )), mutations, max_stack_pow, - phantom: PhantomData, } } } -/// Tuple type of the mutations that compose the Havoc mutator without crossover mutations -pub type HavocMutationsNoCrossoverType = tuple_list_type!( - BitFlipMutator, - ByteFlipMutator, - ByteIncMutator, - ByteDecMutator, - ByteNegMutator, - ByteRandMutator, - ByteAddMutator, - WordAddMutator, - DwordAddMutator, - QwordAddMutator, - ByteInterestingMutator, - WordInterestingMutator, - DwordInterestingMutator, - BytesDeleteMutator, - BytesDeleteMutator, - BytesDeleteMutator, - BytesDeleteMutator, - BytesExpandMutator, - BytesInsertMutator, - BytesRandInsertMutator, - BytesSetMutator, - BytesRandSetMutator, - BytesCopyMutator, - BytesInsertCopyMutator, - BytesSwapMutator, -); - -/// Tuple type of the mutations that compose the Havoc mutator's crossover mutations -pub type HavocCrossoverType = - tuple_list_type!(CrossoverInsertMutator, CrossoverReplaceMutator); - -/// Tuple type of the mutations that compose the Havoc mutator -pub type HavocMutationsType = tuple_list_type!( - BitFlipMutator, - ByteFlipMutator, - ByteIncMutator, - ByteDecMutator, - ByteNegMutator, - ByteRandMutator, - ByteAddMutator, - WordAddMutator, - DwordAddMutator, - QwordAddMutator, - ByteInterestingMutator, - WordInterestingMutator, - DwordInterestingMutator, - BytesDeleteMutator, - BytesDeleteMutator, - BytesDeleteMutator, - BytesDeleteMutator, - BytesExpandMutator, - BytesInsertMutator, - BytesRandInsertMutator, - BytesSetMutator, - BytesRandSetMutator, - BytesCopyMutator, - BytesInsertCopyMutator, - BytesSwapMutator, - CrossoverInsertMutator, - CrossoverReplaceMutator, -); - -/// Get the mutations that compose the Havoc mutator (only applied to single inputs) -#[must_use] -pub fn havoc_mutations_no_crossover() -> HavocMutationsNoCrossoverType { - tuple_list!( - BitFlipMutator::new(), - ByteFlipMutator::new(), - ByteIncMutator::new(), - ByteDecMutator::new(), - ByteNegMutator::new(), - ByteRandMutator::new(), - ByteAddMutator::new(), - WordAddMutator::new(), - DwordAddMutator::new(), - QwordAddMutator::new(), - ByteInterestingMutator::new(), - WordInterestingMutator::new(), - DwordInterestingMutator::new(), - BytesDeleteMutator::new(), - BytesDeleteMutator::new(), - BytesDeleteMutator::new(), - BytesDeleteMutator::new(), - BytesExpandMutator::new(), - BytesInsertMutator::new(), - BytesRandInsertMutator::new(), - BytesSetMutator::new(), - BytesRandSetMutator::new(), - BytesCopyMutator::new(), - BytesInsertCopyMutator::new(), - BytesSwapMutator::new(), - ) -} - -/// Get the mutations that compose the Havoc mutator's crossover strategy -#[must_use] -pub fn havoc_crossover() -> HavocCrossoverType { - tuple_list!( - CrossoverInsertMutator::new(), - CrossoverReplaceMutator::new(), - ) -} - -/// Get the mutations that compose the Havoc mutator -#[must_use] -pub fn havoc_mutations() -> HavocMutationsType { - havoc_mutations_no_crossover().merge(havoc_crossover()) -} - /// Get the mutations that uses the Tokens metadata #[must_use] pub fn tokens_mutations() -> tuple_list_type!(TokenInsert, TokenReplace) { @@ -339,58 +200,32 @@ pub fn tokens_mutations() -> tuple_list_type!(TokenInsert, TokenReplace) { } /// A logging [`Mutator`] that wraps around a [`StdScheduledMutator`]. -pub struct LoggerScheduledMutator -where - MT: MutatorsTuple + NamedTuple, - S: HasRand + HasCorpus, - SM: ScheduledMutator, -{ +#[derive(Debug)] +pub struct LoggerScheduledMutator { name: Cow<'static, str>, scheduled: SM, mutation_log: Vec, - phantom: PhantomData<(I, MT, S)>, } -impl Debug for LoggerScheduledMutator -where - MT: MutatorsTuple + NamedTuple, - S: HasRand + HasCorpus, - SM: ScheduledMutator, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "LoggerScheduledMutator with {} mutations for Input type {}", - MT::LEN, - core::any::type_name::() - ) - } -} - -impl Named for LoggerScheduledMutator -where - MT: MutatorsTuple + NamedTuple, - S: HasRand + HasCorpus, - SM: ScheduledMutator, -{ +impl Named for LoggerScheduledMutator { fn name(&self) -> &Cow<'static, str> { &self.name } } -impl Mutator for LoggerScheduledMutator +impl Mutator for LoggerScheduledMutator where - MT: MutatorsTuple + NamedTuple, S: HasRand + HasCorpus, - SM: ScheduledMutator, + SM: ScheduledMutator, + SM::Mutations: MutatorsTuple + NamedTuple, { fn mutate(&mut self, state: &mut S, input: &mut I) -> Result { self.scheduled_mutate(state, input) } - fn post_exec(&mut self, state: &mut S, corpus_idx: Option) -> Result<(), Error> { - if let Some(idx) = corpus_idx { - let mut testcase = (*state.corpus_mut().get(idx)?).borrow_mut(); + fn post_exec(&mut self, state: &mut S, corpus_id: Option) -> Result<(), Error> { + if let Some(id) = corpus_id { + let mut testcase = (*state.corpus_mut().get(id)?).borrow_mut(); let mut log = Vec::>::new(); while let Some(idx) = self.mutation_log.pop() { let name = self.scheduled.mutations().name(idx.0).unwrap().clone(); // TODO maybe return an Error on None @@ -405,38 +240,42 @@ where } } -impl ComposedByMutations for LoggerScheduledMutator +impl ComposedByMutations for LoggerScheduledMutator where - MT: MutatorsTuple + NamedTuple, - S: HasRand + HasCorpus, - SM: ScheduledMutator, + SM: ComposedByMutations, { + type Mutations = SM::Mutations; #[inline] - fn mutations(&self) -> &MT { + fn mutations(&self) -> &SM::Mutations { self.scheduled.mutations() } #[inline] - fn mutations_mut(&mut self) -> &mut MT { + fn mutations_mut(&mut self) -> &mut SM::Mutations { self.scheduled.mutations_mut() } } -impl ScheduledMutator for LoggerScheduledMutator +impl ScheduledMutator for LoggerScheduledMutator where - MT: MutatorsTuple + NamedTuple, S: HasRand + HasCorpus, - SM: ScheduledMutator, + SM: ScheduledMutator, + SM::Mutations: MutatorsTuple + NamedTuple, { /// Compute the number of iterations used to apply stacked mutations fn iterations(&self, state: &mut S, _: &I) -> u64 { - 1 << (1 + state.rand_mut().below(6)) + 1 << (1 + state.rand_mut().below(nonzero!(7))) } /// Get the next mutation to apply fn schedule(&self, state: &mut S, _: &I) -> MutationId { - debug_assert!(MT::LEN != 0); - state.rand_mut().below(MT::LEN).into() + debug_assert!(::LEN != 0); + // # Safety + // In debug we check the length. Worst case we end up with an illegal MutationId and fail later. + state + .rand_mut() + .below(unsafe { NonZero::new(::LEN).unwrap_unchecked() }) + .into() } fn scheduled_mutate(&mut self, state: &mut S, input: &mut I) -> Result { @@ -455,11 +294,9 @@ where } } -impl LoggerScheduledMutator +impl LoggerScheduledMutator where - MT: MutatorsTuple + NamedTuple, - S: HasRand + HasCorpus, - SM: ScheduledMutator, + SM: Named, { /// Create a new [`LoggerScheduledMutator`] instance without mutations and corpus /// This mutator logs all mutators. @@ -468,7 +305,6 @@ where name: Cow::from(format!("LoggerScheduledMutator[{}]", scheduled.name())), scheduled, mutation_log: vec![], - phantom: PhantomData, } } } @@ -482,9 +318,8 @@ mod tests { feedbacks::ConstFeedback, inputs::{BytesInput, HasMutatorBytes}, mutators::{ - mutations::SpliceMutator, - scheduled::{havoc_mutations, StdScheduledMutator}, - Mutator, + havoc_mutations::havoc_mutations, mutations::SpliceMutator, + scheduled::StdScheduledMutator, Mutator, }, state::StdState, }; @@ -520,19 +355,15 @@ mod tests { log::trace!("{:?}", input.bytes()); // The pre-seeded rand should have spliced at position 2. - assert_eq!(input.bytes(), &[b'a', b'b', b'f']); + assert_eq!(input.bytes(), b"abf"); } #[test] fn test_havoc() { let rand = StdRand::with_seed(0x1337); let mut corpus: InMemoryCorpus = InMemoryCorpus::new(); - corpus - .add(Testcase::new(vec![b'a', b'b', b'c'].into())) - .unwrap(); - corpus - .add(Testcase::new(vec![b'd', b'e', b'f'].into())) - .unwrap(); + corpus.add(Testcase::new(b"abc".to_vec().into())).unwrap(); + corpus.add(Testcase::new(b"def".to_vec().into())).unwrap(); let mut input = corpus.cloned_input_for_id(corpus.first().unwrap()).unwrap(); let input_prior = input.clone(); diff --git a/libafl/src/mutators/token_mutations.rs b/libafl/src/mutators/token_mutations.rs index 5d3b217e97..00c8a0975f 100644 --- a/libafl/src/mutators/token_mutations.rs +++ b/libafl/src/mutators/token_mutations.rs @@ -6,6 +6,7 @@ use core::slice::from_raw_parts; use core::{ fmt::Debug, mem::size_of, + num::NonZero, ops::{Add, AddAssign, Deref}, slice::Iter, }; @@ -17,14 +18,14 @@ use std::{ }; use hashbrown::HashSet; -use libafl_bolts::{rands::Rand, AsSlice}; +use libafl_bolts::{rands::Rand, AsSlice, HasLen}; use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use crate::mutators::str_decode; use crate::{ corpus::{CorpusId, HasCurrentCorpusId}, - inputs::{HasMutatorBytes, UsesInput}, + inputs::HasMutatorBytes, mutators::{ buffer_self_copy, mutations::buffer_copy, MultiMutator, MutationResult, Mutator, Named, }, @@ -83,7 +84,7 @@ impl Tokens { let mut head = 0; loop { if head >= size { - // Sanity Check + // Make double sure this is not completely off assert!(head == size); break; } @@ -313,15 +314,21 @@ where let Some(meta) = state.metadata_map().get::() else { return Ok(MutationResult::Skipped); }; - if meta.tokens().is_empty() { + if let Some(tokens_len) = NonZero::new(meta.tokens().len()) { + tokens_len + } else { return Ok(MutationResult::Skipped); } - meta.tokens().len() }; let token_idx = state.rand_mut().below(tokens_len); let size = input.bytes().len(); - let off = state.rand_mut().below(size + 1); + // # Safety + // after saturating add it's always above 0 + + let off = state + .rand_mut() + .below(unsafe { NonZero::new(size.saturating_add(1)).unwrap_unchecked() }); let meta = state.metadata_map().get::().unwrap(); let token = &meta.tokens()[token_idx]; @@ -367,28 +374,29 @@ pub struct TokenReplace; impl Mutator for TokenReplace where - S: UsesInput + HasMetadata + HasRand + HasMaxSize, + S: HasMetadata + HasRand + HasMaxSize, I: HasMutatorBytes, { fn mutate(&mut self, state: &mut S, input: &mut I) -> Result { let size = input.bytes().len(); - if size == 0 { + let off = if let Some(nz) = NonZero::new(size) { + state.rand_mut().below(nz) + } else { return Ok(MutationResult::Skipped); - } + }; let tokens_len = { let Some(meta) = state.metadata_map().get::() else { return Ok(MutationResult::Skipped); }; - if meta.tokens().is_empty() { + if let Some(tokens_len) = NonZero::new(meta.tokens().len()) { + tokens_len + } else { return Ok(MutationResult::Skipped); } - meta.tokens().len() }; let token_idx = state.rand_mut().below(tokens_len); - let off = state.rand_mut().below(size); - let meta = state.metadata_map().get::().unwrap(); let token = &meta.tokens()[token_idx]; let mut len = token.len(); @@ -426,26 +434,28 @@ pub struct I2SRandReplace; impl Mutator for I2SRandReplace where - S: UsesInput + HasMetadata + HasRand + HasMaxSize, + S: HasMetadata + HasRand + HasMaxSize, I: HasMutatorBytes, { #[allow(clippy::too_many_lines)] fn mutate(&mut self, state: &mut S, input: &mut I) -> Result { let size = input.bytes().len(); - if size == 0 { + let Some(size) = NonZero::new(size) else { return Ok(MutationResult::Skipped); - } + }; let cmps_len = { let Some(meta) = state.metadata_map().get::() else { return Ok(MutationResult::Skipped); }; log::trace!("meta: {:x?}", meta); - if meta.list.is_empty() { - return Ok(MutationResult::Skipped); - } meta.list.len() }; + + let Some(cmps_len) = NonZero::new(cmps_len) else { + return Ok(MutationResult::Skipped); + }; + let idx = state.rand_mut().below(cmps_len); let off = state.rand_mut().below(size); @@ -457,41 +467,41 @@ where let mut result = MutationResult::Skipped; match cmp_values { - CmpValues::U8(v) => { + CmpValues::U8((v1, v2, v1_is_const)) => { for byte in bytes.iter_mut().take(len).skip(off) { - if *byte == v.0 { - *byte = v.1; + if !v1_is_const && *byte == *v1 { + *byte = *v2; result = MutationResult::Mutated; break; - } else if *byte == v.1 { - *byte = v.0; + } else if *byte == *v2 { + *byte = *v1; result = MutationResult::Mutated; break; } } } - CmpValues::U16(v) => { + CmpValues::U16((v1, v2, v1_is_const)) => { if len >= size_of::() { - for i in off..len - (size_of::() - 1) { + for i in off..=len - size_of::() { let val = u16::from_ne_bytes(bytes[i..i + size_of::()].try_into().unwrap()); - if val == v.0 { - let new_bytes = v.1.to_ne_bytes(); + if !v1_is_const && val == *v1 { + let new_bytes = v2.to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; - } else if val.swap_bytes() == v.0 { - let new_bytes = v.1.swap_bytes().to_ne_bytes(); + } else if !v1_is_const && val.swap_bytes() == *v1 { + let new_bytes = v2.swap_bytes().to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; - } else if val == v.1 { - let new_bytes = v.0.to_ne_bytes(); + } else if val == *v2 { + let new_bytes = v1.to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; - } else if val.swap_bytes() == v.1 { - let new_bytes = v.0.swap_bytes().to_ne_bytes(); + } else if val.swap_bytes() == *v2 { + let new_bytes = v1.swap_bytes().to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; @@ -499,28 +509,28 @@ where } } } - CmpValues::U32(v) => { + CmpValues::U32((v1, v2, v1_is_const)) => { if len >= size_of::() { - for i in off..len - (size_of::() - 1) { + for i in off..=len - size_of::() { let val = u32::from_ne_bytes(bytes[i..i + size_of::()].try_into().unwrap()); - if val == v.0 { - let new_bytes = v.1.to_ne_bytes(); + if !v1_is_const && val == *v1 { + let new_bytes = v2.to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; - } else if val.swap_bytes() == v.0 { - let new_bytes = v.1.swap_bytes().to_ne_bytes(); + } else if !v1_is_const && val.swap_bytes() == *v1 { + let new_bytes = v2.swap_bytes().to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; - } else if val == v.1 { - let new_bytes = v.0.to_ne_bytes(); + } else if val == *v2 { + let new_bytes = v1.to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; - } else if val.swap_bytes() == v.1 { - let new_bytes = v.0.swap_bytes().to_ne_bytes(); + } else if val.swap_bytes() == *v2 { + let new_bytes = v1.swap_bytes().to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; @@ -528,28 +538,28 @@ where } } } - CmpValues::U64(v) => { + CmpValues::U64((v1, v2, v1_is_const)) => { if len >= size_of::() { - for i in off..len - (size_of::() - 1) { + for i in off..=len - size_of::() { let val = u64::from_ne_bytes(bytes[i..i + size_of::()].try_into().unwrap()); - if val == v.0 { - let new_bytes = v.1.to_ne_bytes(); + if !v1_is_const && val == *v1 { + let new_bytes = v2.to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; - } else if val.swap_bytes() == v.0 { - let new_bytes = v.1.swap_bytes().to_ne_bytes(); + } else if !v1_is_const && val.swap_bytes() == *v1 { + let new_bytes = v2.swap_bytes().to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; - } else if val == v.1 { - let new_bytes = v.0.to_ne_bytes(); + } else if val == *v2 { + let new_bytes = v1.to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; - } else if val.swap_bytes() == v.1 { - let new_bytes = v.0.swap_bytes().to_ne_bytes(); + } else if val.swap_bytes() == *v2 { + let new_bytes = v1.swap_bytes().to_ne_bytes(); bytes[i..i + size_of::()].copy_from_slice(&new_bytes); result = MutationResult::Mutated; break; @@ -561,9 +571,9 @@ where 'outer: for i in off..len { let mut size = core::cmp::min(v.0.len(), len - i); while size != 0 { - if v.0[0..size] == input.bytes()[i..i + size] { + if v.0.as_slice()[0..size] == input.bytes()[i..i + size] { unsafe { - buffer_copy(input.bytes_mut(), &v.1, 0, i, size); + buffer_copy(input.bytes_mut(), v.1.as_slice(), 0, i, size); } result = MutationResult::Mutated; break 'outer; @@ -572,9 +582,9 @@ where } size = core::cmp::min(v.1.len(), len - i); while size != 0 { - if v.1[0..size] == input.bytes()[i..i + size] { + if v.1.as_slice()[0..size] == input.bytes()[i..i + size] { unsafe { - buffer_copy(input.bytes_mut(), &v.0, 0, i, size); + buffer_copy(input.bytes_mut(), v.0.as_slice(), 0, i, size); } result = MutationResult::Mutated; break 'outer; @@ -604,6 +614,213 @@ impl I2SRandReplace { } } +// A `I2SRandReplaceBinonly` [`Mutator`] replaces a random matching input-2-state comparison operand with the other. +/// It needs a valid [`CmpValuesMetadata`] in the state. +/// This version has been designed for binary-only fuzzing, for which cmp sized can be larger than necessary. +#[derive(Debug, Default)] +pub struct I2SRandReplaceBinonly; + +fn random_slice_size(state: &mut S) -> usize +where + S: HasRand, +{ + let sz_log = SZ.ilog2() as usize; + // # Safety + // We add 1 so this can never be 0. + // On 32 bit systems this could overflow in theory but this is highly unlikely. + let sz_log_inclusive = unsafe { NonZero::new(sz_log + 1).unwrap_unchecked() }; + let res = state.rand_mut().below(sz_log_inclusive); + 2_usize.pow(res as u32) +} + +impl Mutator for I2SRandReplaceBinonly +where + S: HasMetadata + HasRand + HasMaxSize, + I: HasMutatorBytes, +{ + #[allow(clippy::too_many_lines)] + fn mutate(&mut self, state: &mut S, input: &mut I) -> Result { + let Some(size) = NonZero::new(input.bytes().len()) else { + return Ok(MutationResult::Skipped); + }; + let Some(meta) = state.metadata_map().get::() else { + return Ok(MutationResult::Skipped); + }; + log::trace!("meta: {:x?}", meta); + + let Some(cmps_len) = NonZero::new(meta.list.len()) else { + return Ok(MutationResult::Skipped); + }; + let idx = state.rand_mut().below(cmps_len); + + let off = state.rand_mut().below(size); + let len = input.bytes().len(); + let bytes = input.bytes_mut(); + + let meta = state.metadata_map().get::().unwrap(); + let cmp_values = &meta.list[idx]; + + // TODO: do not use from_ne_bytes, it's for host not for target!! we should use a from_target_ne_bytes.... + + let mut result = MutationResult::Skipped; + match cmp_values.clone() { + CmpValues::U8(v) => { + for byte in bytes.iter_mut().take(len).skip(off) { + if *byte == v.0 { + *byte = v.1; + result = MutationResult::Mutated; + break; + } else if *byte == v.1 { + *byte = v.0; + result = MutationResult::Mutated; + break; + } + } + } + CmpValues::U16(v) => { + let cmp_size = random_slice_size::<{ size_of::() }, S>(state); + + if len >= cmp_size { + for i in off..len - (cmp_size - 1) { + let mut val_bytes = [0; size_of::()]; + val_bytes[..cmp_size].copy_from_slice(&bytes[i..i + cmp_size]); + let val = u16::from_ne_bytes(val_bytes); + + if val == v.0 { + let new_bytes = &v.1.to_ne_bytes()[..cmp_size]; + bytes[i..i + cmp_size].copy_from_slice(new_bytes); + result = MutationResult::Mutated; + break; + } else if val == v.1 { + let new_bytes = &v.0.to_ne_bytes()[..cmp_size]; + bytes[i..i + cmp_size].copy_from_slice(new_bytes); + result = MutationResult::Mutated; + break; + } else if val.swap_bytes() == v.0 { + let new_bytes = v.1.swap_bytes().to_ne_bytes(); + bytes[i..i + cmp_size].copy_from_slice(&new_bytes[..cmp_size]); + result = MutationResult::Mutated; + break; + } else if val.swap_bytes() == v.1 { + let new_bytes = v.0.swap_bytes().to_ne_bytes(); + bytes[i..i + cmp_size].copy_from_slice(&new_bytes[..cmp_size]); + result = MutationResult::Mutated; + break; + } + } + } + } + CmpValues::U32(v) => { + let cmp_size = random_slice_size::<{ size_of::() }, S>(state); + if len >= cmp_size { + for i in off..len - (cmp_size - 1) { + let mut val_bytes = [0; size_of::()]; + val_bytes[..cmp_size].copy_from_slice(&bytes[i..i + cmp_size]); + let val = u32::from_ne_bytes(val_bytes); + + if val == v.0 { + let new_bytes = &v.1.to_ne_bytes()[..cmp_size]; + bytes[i..i + cmp_size].copy_from_slice(new_bytes); + result = MutationResult::Mutated; + break; + } else if val == v.1 { + let new_bytes = &v.0.to_ne_bytes()[..cmp_size]; + bytes[i..i + cmp_size].copy_from_slice(new_bytes); + result = MutationResult::Mutated; + break; + } else if val.swap_bytes() == v.0 { + let new_bytes = v.1.swap_bytes().to_ne_bytes(); + bytes[i..i + cmp_size].copy_from_slice(&new_bytes[..cmp_size]); + result = MutationResult::Mutated; + break; + } else if val.swap_bytes() == v.1 { + let new_bytes = v.0.swap_bytes().to_ne_bytes(); + bytes[i..i + cmp_size].copy_from_slice(&new_bytes[..cmp_size]); + result = MutationResult::Mutated; + break; + } + } + } + } + CmpValues::U64(v) => { + let cmp_size = random_slice_size::<{ size_of::() }, S>(state); + + if len >= cmp_size { + for i in off..(len - (cmp_size - 1)) { + let mut val_bytes = [0; size_of::()]; + val_bytes[..cmp_size].copy_from_slice(&bytes[i..i + cmp_size]); + let val = u64::from_ne_bytes(val_bytes); + + if val == v.0 { + let new_bytes = &v.1.to_ne_bytes()[..cmp_size]; + bytes[i..i + cmp_size].copy_from_slice(new_bytes); + result = MutationResult::Mutated; + break; + } else if val == v.1 { + let new_bytes = &v.0.to_ne_bytes()[..cmp_size]; + bytes[i..i + cmp_size].copy_from_slice(new_bytes); + result = MutationResult::Mutated; + break; + } else if val.swap_bytes() == v.0 { + let new_bytes = v.1.swap_bytes().to_ne_bytes(); + bytes[i..i + cmp_size].copy_from_slice(&new_bytes[..cmp_size]); + result = MutationResult::Mutated; + break; + } else if val.swap_bytes() == v.1 { + let new_bytes = v.0.swap_bytes().to_ne_bytes(); + bytes[i..i + cmp_size].copy_from_slice(&new_bytes[..cmp_size]); + result = MutationResult::Mutated; + break; + } + } + } + } + CmpValues::Bytes(v) => { + 'outer: for i in off..len { + let mut size = core::cmp::min(v.0.len(), len - i); + while size != 0 { + if v.0.as_slice()[0..size] == input.bytes()[i..i + size] { + unsafe { + buffer_copy(input.bytes_mut(), v.1.as_slice(), 0, i, size); + } + result = MutationResult::Mutated; + break 'outer; + } + size -= 1; + } + size = core::cmp::min(v.1.len(), len - i); + while size != 0 { + if v.1.as_slice()[0..size] == input.bytes()[i..i + size] { + unsafe { + buffer_copy(input.bytes_mut(), v.0.as_slice(), 0, i, size); + } + result = MutationResult::Mutated; + break 'outer; + } + size -= 1; + } + } + } + } + + Ok(result) + } +} + +impl Named for I2SRandReplaceBinonly { + fn name(&self) -> &Cow<'static, str> { + static NAME: Cow<'static, str> = Cow::Borrowed("I2SRandReplace"); + &NAME + } +} + +impl I2SRandReplaceBinonly { + /// Creates a new `I2SRandReplace` struct. + #[must_use] + pub fn new() -> Self { + Self + } +} const CMP_ATTTRIBUTE_IS_EQUAL: u8 = 1; const CMP_ATTRIBUTE_IS_GREATER: u8 = 2; const CMP_ATTRIBUTE_IS_LESSER: u8 = 4; @@ -618,9 +835,9 @@ pub struct AFLppRedQueen { enable_transform: bool, enable_arith: bool, text_type: TextType, - /// We use this variable to check if we scheduled a new `corpus_idx` + /// We use this variable to check if we scheduled a new `corpus_id` /// - and, hence, need to recalculate `text_type` - last_corpus_idx: Option, + last_corpus_id: Option, } impl AFLppRedQueen { @@ -1087,7 +1304,7 @@ impl AFLppRedQueen { impl MultiMutator for AFLppRedQueen where - S: UsesInput + HasMetadata + HasRand + HasMaxSize + HasCorpus + HasCurrentCorpusId, + S: HasMetadata + HasRand + HasMaxSize + HasCorpus + HasCurrentCorpusId, I: HasMutatorBytes + From>, { #[allow(clippy::needless_range_loop)] @@ -1135,10 +1352,10 @@ where // println!("orig: {:#?} new: {:#?}", orig_cmpvals, new_cmpvals); // Compute when mutating it for the 1st time. - let current_corpus_id = state.current_corpus_id()?.ok_or_else(|| Error::key_not_found("No corpus-idx is currently being fuzzed, but called AFLppRedQueen::multi_mutated()."))?; - if self.last_corpus_idx.is_none() || self.last_corpus_idx.unwrap() != current_corpus_id { + let current_corpus_id = state.current_corpus_id()?.ok_or_else(|| Error::key_not_found("No corpus-id is currently being fuzzed, but called AFLppRedQueen::multi_mutated()."))?; + if self.last_corpus_id.is_none() || self.last_corpus_id.unwrap() != current_corpus_id { self.text_type = check_if_text(orig_bytes, orig_bytes.len()); - self.last_corpus_idx = Some(current_corpus_id); + self.last_corpus_id = Some(current_corpus_id); } // println!("approximate size: {cmp_len} x {input_len}"); for cmp_idx in 0..cmp_len { @@ -1193,7 +1410,7 @@ where None => input_len - cmp_buf_idx, }; - let hshape = (header.shape() + 1) as usize; + let hshape = (header.shape().value() + 1) as usize; match (&orig_val[cmp_h_idx], &new_val[cmp_h_idx]) { (CmpValues::U8(_orig), CmpValues::U8(_new)) => { @@ -1289,7 +1506,7 @@ where } (CmpValues::U16(orig), CmpValues::U16(new)) => { let (orig_v0, orig_v1, new_v0, new_v1) = (orig.0, orig.1, new.0, new.1); - let attribute: u8 = header.attribute() as u8; + let attribute: u8 = header.attribute().value(); if new_v0 != orig_v0 && orig_v0 != orig_v1 { // Compare v0 against v1 @@ -1377,7 +1594,7 @@ where } (CmpValues::U32(orig), CmpValues::U32(new)) => { let (orig_v0, orig_v1, new_v0, new_v1) = (orig.0, orig.1, new.0, new.1); - let attribute = header.attribute() as u8; + let attribute = header.attribute().value(); let mut cmp_found = false; if new_v0 != orig_v0 && orig_v0 != orig_v1 { @@ -1470,7 +1687,7 @@ where } (CmpValues::U64(orig), CmpValues::U64(new)) => { let (orig_v0, orig_v1, new_v0, new_v1) = (orig.0, orig.1, new.0, new.1); - let attribute = header.attribute() as u8; + let attribute = header.attribute().value(); let mut cmp_found = false; if new_v0 != orig_v0 && orig_v0 != orig_v1 { @@ -1568,10 +1785,10 @@ where let mut rtn_found = false; // Compare v0 against v1 rtn_found |= self.rtn_extend_encoding( - orig_v0, - orig_v1, - new_v0, - new_v1, + orig_v0.as_slice(), + orig_v1.as_slice(), + new_v0.as_slice(), + new_v1.as_slice(), new_bytes, orig_bytes, cmp_buf_idx, @@ -1583,10 +1800,10 @@ where // Compare v1 against v0 rtn_found |= self.rtn_extend_encoding( - orig_v1, - orig_v0, - new_v1, - new_v0, + orig_v1.as_slice(), + orig_v0.as_slice(), + new_v1.as_slice(), + new_v0.as_slice(), new_bytes, orig_bytes, cmp_buf_idx, @@ -1601,10 +1818,10 @@ where let mut v1_len = orig_v1.len(); if v0_len > 0 && (is_ascii_or_utf8 - || check_if_text(orig_v0, v0_len).size() == hshape) + || check_if_text(orig_v0.as_slice(), v0_len).size() == hshape) { // this is not utf8. - let v = strlen(orig_v0); + let v = strlen(orig_v0.as_slice()); if v > 0 { v0_len = v; } @@ -1612,10 +1829,10 @@ where if v1_len > 0 && (is_ascii_or_utf8 - || check_if_text(orig_v1, v1_len).size() == hshape) + || check_if_text(orig_v1.as_slice(), v1_len).size() == hshape) { // this is not utf8. - let v = strlen(orig_v1); + let v = strlen(orig_v1.as_slice()); if v > 0 { v1_len = v; } @@ -1623,16 +1840,26 @@ where if v0_len > 0 && orig_v0 == new_v0 - && (!rtn_found || check_if_text(orig_v0, v0_len).size() == v0_len) + && (!rtn_found + || check_if_text(orig_v0.as_slice(), v0_len).size() == v0_len) { - Self::try_add_autotokens(&mut gathered_tokens, orig_v0, v0_len); + Self::try_add_autotokens( + &mut gathered_tokens, + orig_v0.as_slice(), + v0_len, + ); } if v1_len > 0 && orig_v1 == new_v1 - && (!rtn_found || check_if_text(orig_v1, v1_len).size() == v1_len) + && (!rtn_found + || check_if_text(orig_v1.as_slice(), v1_len).size() == v1_len) { - Self::try_add_autotokens(&mut gathered_tokens, orig_v1, v1_len); + Self::try_add_autotokens( + &mut gathered_tokens, + orig_v1.as_slice(), + v1_len, + ); } } (_, _) => { @@ -1690,7 +1917,7 @@ impl AFLppRedQueen { enable_transform: false, enable_arith: false, text_type: TextType::None, - last_corpus_idx: None, + last_corpus_id: None, } } @@ -1701,7 +1928,7 @@ impl AFLppRedQueen { enable_transform: transform, enable_arith: arith, text_type: TextType::None, - last_corpus_idx: None, + last_corpus_id: None, } } diff --git a/libafl/src/mutators/tuneable.rs b/libafl/src/mutators/tuneable.rs index 4ef1dec2c9..29b4647a10 100644 --- a/libafl/src/mutators/tuneable.rs +++ b/libafl/src/mutators/tuneable.rs @@ -1,15 +1,14 @@ //! An extension to the `ScheduledMutator` which schedules multiple mutations internally. +//! //! Instead of a random mutator for a random amount of iterations, we can run //! a specific mutator for a specified amount of iterations use alloc::{borrow::Cow, vec::Vec}; -use core::{ - fmt::{self, Debug}, - marker::PhantomData, -}; +use core::{fmt::Debug, num::NonZero}; use libafl_bolts::{ - impl_serdeany, math::calculate_cumulative_distribution_in_place, rands::Rand, Named, + impl_serdeany, math::calculate_cumulative_distribution_in_place, rands::Rand, + tuples::NamedTuple, Named, }; use serde::{Deserialize, Serialize}; @@ -80,33 +79,14 @@ impl TuneableScheduledMutatorMetadata { /// A [`Mutator`] that schedules one of the embedded mutations on each call. /// The index of the next mutation can be set. -pub struct TuneableScheduledMutator -where - MT: MutatorsTuple, - S: HasRand, -{ +#[derive(Debug)] +pub struct TuneableScheduledMutator { name: Cow<'static, str>, mutations: MT, max_stack_pow: usize, - phantom: PhantomData<(I, S)>, } -impl Debug for TuneableScheduledMutator -where - MT: MutatorsTuple, - S: HasRand, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "TuneableScheduledMutator with {} mutations for Input type {}", - self.mutations.len(), - core::any::type_name::() - ) - } -} - -impl Mutator for TuneableScheduledMutator +impl Mutator for TuneableScheduledMutator where MT: MutatorsTuple, S: HasRand + HasMetadata, @@ -117,11 +97,8 @@ where } } -impl ComposedByMutations for TuneableScheduledMutator -where - MT: MutatorsTuple, - S: HasRand, -{ +impl ComposedByMutations for TuneableScheduledMutator { + type Mutations = MT; /// Get the mutations #[inline] fn mutations(&self) -> &MT { @@ -135,17 +112,13 @@ where } } -impl Named for TuneableScheduledMutator -where - MT: MutatorsTuple, - S: HasRand, -{ +impl Named for TuneableScheduledMutator { fn name(&self) -> &Cow<'static, str> { &self.name } } -impl ScheduledMutator for TuneableScheduledMutator +impl ScheduledMutator for TuneableScheduledMutator where MT: MutatorsTuple, S: HasRand + HasMetadata, @@ -159,7 +132,7 @@ where iters } else { // fall back to random - 1 << (1 + state.rand_mut().below(self.max_stack_pow)) + 1 << (1 + state.rand_mut().zero_upto(self.max_stack_pow)) } } else { // We will sample using the mutation probabilities. @@ -180,7 +153,6 @@ where /// Get the next mutation to apply fn schedule(&self, state: &mut S, _: &I) -> MutationId { - debug_assert!(self.mutations.len() != 0); // Assumption: we can not reach this code path without previously adding this metadatum. let metadata = TuneableScheduledMutatorMetadata::get_mut(state).unwrap(); @@ -223,17 +195,20 @@ where } // fall back to random if no entries in either vec, the scheduling is not tuned. - state.rand_mut().below(self.mutations.len()).into() + state + .rand_mut() + .below(NonZero::new(self.mutations.len()).expect("No mutations provided!")) + .into() } } -impl TuneableScheduledMutator -where - MT: MutatorsTuple, - S: HasRand + HasMetadata, -{ +impl TuneableScheduledMutator { /// Create a new [`TuneableScheduledMutator`] instance specifying mutations - pub fn new(state: &mut S, mutations: MT) -> Self { + pub fn new(state: &mut S, mutations: MT) -> Self + where + MT: NamedTuple, + S: HasRand + HasMetadata, + { if !state.has_metadata::() { state.add_metadata(TuneableScheduledMutatorMetadata::default()); } @@ -241,36 +216,22 @@ where name: Cow::from(format!("TuneableMutator[{}]", mutations.names().join(", "))), mutations, max_stack_pow: 7, - phantom: PhantomData, } } } -impl TuneableScheduledMutator<(), (), S> -where - S: HasRand + HasMetadata, -{ - fn metadata_mut(state: &mut S) -> &mut TuneableScheduledMutatorMetadata { - state - .metadata_map_mut() - .get_mut::() - .unwrap() - } - - fn metadata(state: &S) -> &TuneableScheduledMutatorMetadata { - state - .metadata_map() - .get::() - .unwrap() - } - +impl TuneableScheduledMutator { /// Sets the next iterations count, i.e., how many times to mutate the input /// /// Using `set_mutation_ids_and_iter` to set multiple values at the same time /// will be faster than setting them individually /// as it internally only needs a single metadata lookup - pub fn set_iters(state: &mut S, iters: u64) { - let metadata = Self::metadata_mut(state); + pub fn set_iters(&self, state: &mut S, iters: u64) + where + S: HasMetadata, + { + let metadata = TuneableScheduledMutatorMetadata::get_mut(state).unwrap(); + metadata.iters = Some(iters); metadata.iter_probabilities_pow_cumulative.clear(); } @@ -284,16 +245,20 @@ where /// These will be applied for each call of this `mutate` function. /// /// Setting this function will unset everything previously set in `set_iters`. - pub fn set_iter_probabilities_pow( + pub fn set_iter_probabilities_pow( + &self, state: &mut S, mut iter_probabilities_pow: Vec, - ) -> Result<(), Error> { + ) -> Result<(), Error> + where + S: HasMetadata, + { if iter_probabilities_pow.len() >= 32 { return Err(Error::illegal_argument( "Cannot stack more than 2^32 mutations", )); } - let metadata = Self::metadata_mut(state); + let metadata = TuneableScheduledMutatorMetadata::get_mut(state).unwrap(); metadata.iters = None; // we precalculate the cumulative probability to be faster when sampling later. @@ -304,13 +269,19 @@ where } /// Gets the set amount of iterations - pub fn get_iters(state: &S) -> Option { - let metadata = Self::metadata(state); + pub fn get_iters(&self, state: &S) -> Option + where + S: HasMetadata, + { + let metadata = TuneableScheduledMutatorMetadata::get(state).unwrap(); metadata.iters } /// Sets the mutation ids - pub fn set_mutation_ids(state: &mut S, mutations: Vec) { + pub fn set_mutation_ids(&self, state: &mut S, mutations: Vec) + where + S: HasMetadata, + { let metadata = TuneableScheduledMutatorMetadata::get_mut(state).unwrap(); metadata.mutation_ids = mutations; metadata.next_id = 0.into(); @@ -320,10 +291,14 @@ where /// The `Vec` contains a probability per [`MutationId`]: between 0 and 1, and they have to add /// up to 1. /// Setting the probabilities will remove the value set through `set_mutation_ids`. - pub fn set_mutation_probabilities( + pub fn set_mutation_probabilities( + &self, state: &mut S, mut mutation_probabilities: Vec, - ) -> Result<(), Error> { + ) -> Result<(), Error> + where + S: HasMetadata, + { let metadata = TuneableScheduledMutatorMetadata::get_mut(state).unwrap(); metadata.mutation_ids.clear(); metadata.next_id = 0.into(); @@ -335,7 +310,14 @@ where } /// mutation ids and iterations - pub fn set_mutation_ids_and_iters(state: &mut S, mutations: Vec, iters: u64) { + pub fn set_mutation_ids_and_iters( + &self, + state: &mut S, + mutations: Vec, + iters: u64, + ) where + S: HasMetadata, + { let metadata = TuneableScheduledMutatorMetadata::get_mut(state).unwrap(); metadata.mutation_ids = mutations; metadata.next_id = 0.into(); @@ -343,14 +325,23 @@ where } /// Appends a mutation id to the end of the mutations - pub fn push_mutation_id(state: &mut S, mutation_id: MutationId) { + pub fn push_mutation_id(state: &mut S, mutation_id: MutationId) + where + S: HasMetadata, + { let metadata = TuneableScheduledMutatorMetadata::get_mut(state).unwrap(); metadata.mutation_ids.push(mutation_id); } /// Resets this to a randomic mutational stage - pub fn reset(state: &mut S) { - let metadata = Self::metadata_mut(state); + pub fn reset(self, state: &mut S) + where + S: HasMetadata, + { + let metadata = state + .metadata_map_mut() + .get_mut::() + .unwrap(); metadata.mutation_ids.clear(); metadata.next_id = 0.into(); metadata.iters = None; @@ -416,44 +407,36 @@ mod test { let input = BytesInput::new(vec![42]); // Basic tests over the probability distribution. - assert!( - TuneableScheduledMutator::set_mutation_probabilities(&mut state, vec![0.0]).is_err() - ); - assert!( - TuneableScheduledMutator::set_mutation_probabilities(&mut state, vec![1.0; 3]).is_err() - ); - assert!(TuneableScheduledMutator::set_mutation_probabilities( - &mut state, - vec![-1.0, 1.0, 1.0] - ) - .is_err()); - assert!(TuneableScheduledMutator::set_mutation_probabilities(&mut state, vec![]).is_err()); + assert!(tuneable + .set_mutation_probabilities(&mut state, vec![0.0]) + .is_err()); + assert!(tuneable + .set_mutation_probabilities(&mut state, vec![1.0; 3]) + .is_err()); + assert!(tuneable + .set_mutation_probabilities(&mut state, vec![-1.0, 1.0, 1.0]) + .is_err()); + assert!(tuneable + .set_mutation_probabilities(&mut state, vec![]) + .is_err()); - assert!(TuneableScheduledMutator::set_mutation_probabilities( - &mut state, - vec![0.0, 0.0, 1.0] - ) - .is_ok()); + assert!(tuneable + .set_mutation_probabilities(&mut state, vec![0.0, 0.0, 1.0]) + .is_ok()); assert_eq!(tuneable.schedule(&mut state, &input), 2.into()); - assert!(TuneableScheduledMutator::set_mutation_probabilities( - &mut state, - vec![0.0, 1.0, 0.0] - ) - .is_ok()); + assert!(tuneable + .set_mutation_probabilities(&mut state, vec![0.0, 1.0, 0.0]) + .is_ok()); assert_eq!(tuneable.schedule(&mut state, &input), 1.into()); - assert!(TuneableScheduledMutator::set_mutation_probabilities( - &mut state, - vec![1.0, 0.0, 0.0] - ) - .is_ok()); + assert!(tuneable + .set_mutation_probabilities(&mut state, vec![1.0, 0.0, 0.0]) + .is_ok()); assert_eq!(tuneable.schedule(&mut state, &input), 0.into()); // We should not choose a mutation with p=0. - assert!(TuneableScheduledMutator::set_mutation_probabilities( - &mut state, - vec![0.5, 0.0, 0.5] - ) - .is_ok()); + assert!(tuneable + .set_mutation_probabilities(&mut state, vec![0.5, 0.0, 0.5]) + .is_ok()); assert!(tuneable.schedule(&mut state, &input) != 1.into()); } } diff --git a/libafl/src/mutators/unicode/mod.rs b/libafl/src/mutators/unicode/mod.rs index 137ec86710..d73e5fd58a 100644 --- a/libafl/src/mutators/unicode/mod.rs +++ b/libafl/src/mutators/unicode/mod.rs @@ -3,15 +3,17 @@ use alloc::{borrow::Cow, vec::Vec}; use core::{ cmp::{Ordering, Reverse}, + num::NonZero, ops::Range, }; use libafl_bolts::{rands::Rand, Error, HasLen, Named}; use crate::{ - corpus::{CorpusId, HasTestcase, Testcase}, + corpus::{Corpus, CorpusId, HasTestcase, Testcase}, inputs::{BytesInput, HasMutatorBytes}, mutators::{rand_range, MutationResult, Mutator, Tokens}, + nonzero, stages::{ extract_metadata, mutational::{MutatedTransform, MutatedTransformPost}, @@ -32,7 +34,8 @@ pub type UnicodeInput = (BytesInput, UnicodeIdentificationMetadata); impl MutatedTransform for UnicodeInput where - S: HasCorpus + HasTestcase, + S: HasCorpus + HasTestcase, + S::Corpus: Corpus, { type Post = UnicodeIdentificationMetadata; @@ -51,9 +54,9 @@ impl MutatedTransformPost for UnicodeIdentificationMetadata where S: HasTestcase, { - fn post_exec(self, state: &mut S, corpus_idx: Option) -> Result<(), Error> { - if let Some(corpus_idx) = corpus_idx { - let mut tc = state.testcase_mut(corpus_idx)?; + fn post_exec(self, state: &mut S, corpus_id: Option) -> Result<(), Error> { + if let Some(corpus_id) = corpus_id { + let mut tc = state.testcase_mut(corpus_id)?; tc.add_metadata(self); } Ok(()) @@ -67,13 +70,15 @@ fn choose_start( bytes: &[u8], meta: &UnicodeIdentificationMetadata, ) -> Option<(usize, usize)> { - let idx = rand.below(bytes.len()); + let bytes_len = NonZero::new(bytes.len())?; + + let idx = rand.below(bytes_len); let mut options = Vec::new(); for (start, range) in meta.ranges() { if idx .checked_sub(*start) // idx adjusted to start .and_then(|idx| (idx < range.len()).then(|| range[idx])) // idx in range - .map_or(false, |r| r) + .is_some_and(|r| r) { options.push((*start, range)); } @@ -81,12 +86,15 @@ fn choose_start( match options.len() { 0 => None, 1 => Some((options[0].0, options[0].1.len())), - _ => { + options_len => { + // # Safety + // options.len() is checked above. + let options_len_squared = + unsafe { NonZero::new(options_len * options_len).unwrap_unchecked() }; // bias towards longer strings options.sort_by_cached_key(|(_, entries)| entries.count_ones()); let selected = - libafl_bolts::math::integer_sqrt(rand.below(options.len() * options.len()) as u64) - as usize; + libafl_bolts::math::integer_sqrt(rand.below(options_len_squared) as u64) as usize; Some((options[selected].0, options[selected].1.len())) } } @@ -134,7 +142,8 @@ fn choose_category_range( string: &str, ) -> (Range, &'static [(u32, u32)]) { let chars = string.char_indices().collect::>(); - let idx = rand.below(chars.len()); + let chars_len = NonZero::new(chars.len()).expect("Got empty string in choose_category_range"); + let idx = rand.below(chars_len); let c = chars[idx].1; // figure out the categories for this char @@ -160,7 +169,8 @@ fn choose_category_range( .sum::(), ) }); - let options = categories.len() * categories.len(); + let options = NonZero::new(categories.len() * categories.len()) + .expect("Empty categories in choose_category_range"); let selected_idx = libafl_bolts::math::integer_sqrt(rand.below(options) as u64) as usize; let selected = categories[selected_idx]; @@ -178,7 +188,8 @@ fn choose_category_range( fn choose_subcategory_range(rand: &mut R, string: &str) -> (Range, (u32, u32)) { let chars = string.char_indices().collect::>(); - let idx = rand.below(chars.len()); + let idx = + rand.below(NonZero::new(chars.len()).expect("Empty string in choose_subcategory_range")); let c = chars[idx].1; // figure out the categories for this char @@ -197,7 +208,8 @@ fn choose_subcategory_range(rand: &mut R, string: &str) -> (Range char>( range: Range, char_gen: F, ) -> MutationResult { - let temp_range = rand_range(state, range.end - range.start, MAX_CHARS); + let temp_range = rand_range(state, range.end - range.start, nonzero!(MAX_CHARS)); let range = (range.start + temp_range.start)..(range.start + temp_range.end); let range = match core::str::from_utf8(&input.0.bytes()[range.clone()]) { Ok(_) => range, @@ -239,7 +251,7 @@ fn rand_replace_range char>( return MutationResult::Skipped; } - let replace_len = state.rand_mut().below(MAX_CHARS); + let replace_len = state.rand_mut().below(nonzero!(MAX_CHARS)); let orig_len = range.end - range.start; if input.0.len() - orig_len + replace_len > state.max_size() { return MutationResult::Skipped; @@ -304,7 +316,10 @@ where .map(|&(start, end)| end as usize - start as usize + 1) .sum(); let char_gen = |state: &mut S| loop { - let mut selected = state.rand_mut().below(options); + // Should this skip the mutation instead of expecting? + let mut selected = state.rand_mut().below( + NonZero::new(options).expect("Empty category in UnicodeCatgoryRandMutator"), + ); for &(min, max) in category { if let Some(next_selected) = selected.checked_sub(max as usize - min as usize + 1) @@ -359,6 +374,9 @@ where ); let options = subcategory.1 as usize - subcategory.0 as usize + 1; + let Some(options) = NonZero::new(options) else { + return Ok(MutationResult::Skipped); + }; let char_gen = |state: &mut S| loop { let selected = state.rand_mut().below(options); if let Some(new_c) = char::from_u32(selected as u32 + subcategory.0) { @@ -393,15 +411,14 @@ where return Ok(MutationResult::Skipped); } - let tokens_len = { - let Some(meta) = state.metadata_map().get::() else { - return Ok(MutationResult::Skipped); - }; - if meta.tokens().is_empty() { - return Ok(MutationResult::Skipped); - } - meta.tokens().len() + let Some(meta) = state.metadata_map().get::() else { + return Ok(MutationResult::Skipped); }; + + let Some(tokens_len) = NonZero::new(meta.tokens().len()) else { + return Ok(MutationResult::Skipped); + }; + let token_idx = state.rand_mut().below(tokens_len); let bytes = input.0.bytes(); @@ -453,15 +470,14 @@ where return Ok(MutationResult::Skipped); } - let tokens_len = { - let Some(meta) = state.metadata_map().get::() else { - return Ok(MutationResult::Skipped); - }; - if meta.tokens().is_empty() { - return Ok(MutationResult::Skipped); - } - meta.tokens().len() + let Some(meta) = state.metadata_map().get::() else { + return Ok(MutationResult::Skipped); }; + + let Some(tokens_len) = NonZero::new(meta.tokens().len()) else { + return Ok(MutationResult::Skipped); + }; + let token_idx = state.rand_mut().below(tokens_len); let bytes = input.0.bytes(); diff --git a/libafl/src/observers/cmp.rs b/libafl/src/observers/cmp.rs index 3c89616ac6..de35f6e1f3 100644 --- a/libafl/src/observers/cmp.rs +++ b/libafl/src/observers/cmp.rs @@ -1,53 +1,64 @@ //! The `CmpObserver` provides access to the logged values of CMP instructions - use alloc::{borrow::Cow, vec::Vec}; use core::{ fmt::Debug, - marker::PhantomData, ops::{Deref, DerefMut}, }; -use c2rust_bitfields::BitfieldStruct; +use arbitrary_int::{u1, u4, u5, u6}; +use bitbybit::bitfield; use hashbrown::HashMap; -use libafl_bolts::{ownedref::OwnedRefMut, serdeany::SerdeAny, Named}; -use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use libafl_bolts::{ownedref::OwnedRefMut, AsSlice, HasLen, Named}; +use serde::{Deserialize, Serialize}; -use crate::{executors::ExitKind, inputs::UsesInput, observers::Observer, Error, HasMetadata}; +use crate::{executors::ExitKind, observers::Observer, Error, HasMetadata}; -/// Generic metadata trait for use in a `CmpObserver`, which adds comparisons from a `CmpObserver` -/// primarily intended for use with `AFLppCmpValuesMetadata` or `CmpValuesMetadata` -pub trait CmpObserverMetadata<'a, CM>: SerdeAny + Debug -where - CM: CmpMap + Debug, -{ - /// Extra data used by the metadata when adding information from a `CmpObserver`, for example - /// the `original` field in `AFLppCmpLogObserver` - type Data: 'a + Debug + Default + Serialize + DeserializeOwned; +/// A bytes string for cmplog with up to 32 elements. +#[derive(Debug, Copy, Clone, Serialize, Deserialize, Eq, PartialEq)] +pub struct CmplogBytes { + buf: [u8; 32], + len: u8, +} - /// Instantiate a new metadata instance. This is used by `CmpObserver` to create a new - /// metadata if one is missing and `add_meta` is specified. This will typically juse call - /// `new()` - fn new_metadata() -> Self; +impl CmplogBytes { + /// Creates a new [`CmplogBytes`] object from the provided buf and length. + /// Lengths above 32 are illegal but will be ignored. + #[must_use] + pub fn from_buf_and_len(buf: [u8; 32], len: u8) -> Self { + debug_assert!(len <= 32, "Len too big: {len}, max: 32"); + CmplogBytes { buf, len } + } +} - /// Add comparisons to a metadata from a `CmpObserver`. `cmp_map` is mutable in case - /// it is needed for a custom map, but this is not utilized for `CmpObserver` or - /// `AFLppCmpLogObserver`. - fn add_from(&mut self, usable_count: usize, cmp_map: &mut CM, cmp_observer_data: Self::Data); +impl<'a> AsSlice<'a> for CmplogBytes { + type Entry = u8; + + type SliceRef = &'a [u8]; + + fn as_slice(&'a self) -> Self::SliceRef { + &self.buf[0..(self.len as usize)] + } +} + +impl HasLen for CmplogBytes { + fn len(&self) -> usize { + self.len as usize + } } /// Compare values collected during a run #[derive(Eq, PartialEq, Debug, Serialize, Deserialize, Clone)] pub enum CmpValues { - /// Two u8 values - U8((u8, u8)), - /// Two u16 values - U16((u16, u16)), - /// Two u32 values - U32((u32, u32)), - /// Two u64 values - U64((u64, u64)), + /// (side 1 of comparison, side 2 of comparison, side 1 value is const) + U8((u8, u8, bool)), + /// (side 1 of comparison, side 2 of comparison, side 1 value is const) + U16((u16, u16, bool)), + /// (side 1 of comparison, side 2 of comparison, side 1 value is const) + U32((u32, u32, bool)), + /// (side 1 of comparison, side 2 of comparison, side 1 value is const) + U64((u64, u64, bool)), /// Two vecs of u8 values/byte - Bytes((Vec, Vec)), + Bytes((CmplogBytes, CmplogBytes)), } impl CmpValues { @@ -62,11 +73,11 @@ impl CmpValues { /// Converts the value to a u64 tuple #[must_use] - pub fn to_u64_tuple(&self) -> Option<(u64, u64)> { + pub fn to_u64_tuple(&self) -> Option<(u64, u64, bool)> { match self { - CmpValues::U8(t) => Some((u64::from(t.0), u64::from(t.1))), - CmpValues::U16(t) => Some((u64::from(t.0), u64::from(t.1))), - CmpValues::U32(t) => Some((u64::from(t.0), u64::from(t.1))), + CmpValues::U8(t) => Some((u64::from(t.0), u64::from(t.1), t.2)), + CmpValues::U16(t) => Some((u64::from(t.0), u64::from(t.1), t.2)), + CmpValues::U32(t) => Some((u64::from(t.0), u64::from(t.1), t.2)), CmpValues::U64(t) => Some(*t), CmpValues::Bytes(_) => None, } @@ -106,20 +117,14 @@ impl CmpValuesMetadata { pub fn new() -> Self { Self { list: vec![] } } -} -impl<'a, CM> CmpObserverMetadata<'a, CM> for CmpValuesMetadata -where - CM: CmpMap, -{ - type Data = bool; - - #[must_use] - fn new_metadata() -> Self { - Self::new() - } - - fn add_from(&mut self, usable_count: usize, cmp_map: &mut CM, _: Self::Data) { + /// Add comparisons to a metadata from a `CmpObserver`. `cmp_map` is mutable in case + /// it is needed for a custom map, but this is not utilized for `CmpObserver` or + /// `AFLppCmpLogObserver`. + pub fn add_from(&mut self, usable_count: usize, cmp_map: &mut CM) + where + CM: CmpMap, + { self.list.clear(); let count = usable_count; for i in 0..count { @@ -199,65 +204,35 @@ pub trait CmpMap: Debug { } /// A [`CmpObserver`] observes the traced comparisons during the current execution using a [`CmpMap`] -pub trait CmpObserver<'a, CM, S, M>: Observer -where - CM: CmpMap, - S: UsesInput, - M: CmpObserverMetadata<'a, CM>, -{ +pub trait CmpObserver { + /// The underlying map + type Map; /// Get the number of usable cmps (all by default) fn usable_count(&self) -> usize; /// Get the `CmpMap` - fn cmp_map(&self) -> &CM; + fn cmp_map(&self) -> &Self::Map; - /// Get the `CmpMap` (mutable) - fn cmp_map_mut(&mut self) -> &mut CM; - - /// Get the observer data. By default, this is the default metadata aux data, which is `()`. - fn cmp_observer_data(&self) -> M::Data { - M::Data::default() - } - - /// Add [`struct@CmpValuesMetadata`] to the State including the logged values. - /// This routine does a basic loop filtering because loop index cmps are not interesting. - fn add_cmpvalues_meta(&mut self, state: &mut S) - where - S: HasMetadata, - { - #[allow(clippy::option_if_let_else)] // we can't mutate state in a closure - let meta = state.metadata_or_insert_with(|| M::new_metadata()); - - let usable_count = self.usable_count(); - let cmp_observer_data = self.cmp_observer_data(); - - meta.add_from(usable_count, self.cmp_map_mut(), cmp_observer_data); - } + /// Get the mut `CmpMap` + fn cmp_map_mut(&mut self) -> &mut Self::Map; } /// A standard [`CmpObserver`] observer #[derive(Serialize, Deserialize, Debug)] -#[serde(bound = "CM: serde::de::DeserializeOwned")] -pub struct StdCmpObserver<'a, CM, S, M> -where - CM: CmpMap + Serialize, - S: UsesInput + HasMetadata, - M: CmpObserverMetadata<'a, CM>, -{ +#[serde(bound = "CM: serde::de::DeserializeOwned + Serialize")] +pub struct StdCmpObserver<'a, CM> { cmp_map: OwnedRefMut<'a, CM>, size: Option>, name: Cow<'static, str>, add_meta: bool, - data: M::Data, - phantom: PhantomData, } -impl<'a, CM, S, M> CmpObserver<'a, CM, S, M> for StdCmpObserver<'a, CM, S, M> +impl CmpObserver for StdCmpObserver<'_, CM> where - CM: CmpMap + Serialize + DeserializeOwned, - S: UsesInput + Debug + HasMetadata, - M: CmpObserverMetadata<'a, CM>, + CM: HasLen, { + type Map = CM; + /// Get the number of usable cmps (all by default) fn usable_count(&self) -> usize { match &self.size { @@ -266,59 +241,45 @@ where } } - fn cmp_map(&self) -> &CM { + fn cmp_map(&self) -> &Self::Map { self.cmp_map.as_ref() } - fn cmp_map_mut(&mut self) -> &mut CM { + fn cmp_map_mut(&mut self) -> &mut Self::Map { self.cmp_map.as_mut() } - - fn cmp_observer_data(&self) -> >::Data { - >::Data::default() - } } -impl<'a, CM, S, M> Observer for StdCmpObserver<'a, CM, S, M> +impl Observer for StdCmpObserver<'_, CM> where - CM: CmpMap + Serialize + DeserializeOwned, - S: UsesInput + Debug + HasMetadata, - M: CmpObserverMetadata<'a, CM>, + CM: Serialize + CmpMap + HasLen, + S: HasMetadata, { - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.cmp_map.as_mut().reset()?; Ok(()) } - fn post_exec( - &mut self, - state: &mut S, - _input: &S::Input, - _exit_kind: &ExitKind, - ) -> Result<(), Error> { + fn post_exec(&mut self, state: &mut S, _input: &I, _exit_kind: &ExitKind) -> Result<(), Error> { if self.add_meta { - self.add_cmpvalues_meta(state); + #[allow(clippy::option_if_let_else)] // we can't mutate state in a closure + let meta = state.metadata_or_insert_with(CmpValuesMetadata::new); + + meta.add_from(self.usable_count(), self.cmp_map_mut()); } Ok(()) } } -impl<'a, CM, S, M> Named for StdCmpObserver<'a, CM, S, M> -where - CM: CmpMap + Serialize + DeserializeOwned, - S: UsesInput + HasMetadata, - M: CmpObserverMetadata<'a, CM>, -{ +impl Named for StdCmpObserver<'_, CM> { fn name(&self) -> &Cow<'static, str> { &self.name } } -impl<'a, CM, S, M> StdCmpObserver<'a, CM, S, M> +impl<'a, CM> StdCmpObserver<'a, CM> where - CM: CmpMap + Serialize + DeserializeOwned, - S: UsesInput + HasMetadata, - M: CmpObserverMetadata<'a, CM>, + CM: CmpMap, { /// Creates a new [`StdCmpObserver`] with the given name and map. #[must_use] @@ -328,27 +289,6 @@ where size: None, cmp_map: map, add_meta, - data: M::Data::default(), - phantom: PhantomData, - } - } - - /// Creates a new [`StdCmpObserver`] with the given name, map, and auxiliary data used to - /// populate metadata - #[must_use] - pub fn with_data( - name: &'static str, - cmp_map: OwnedRefMut<'a, CM>, - add_meta: bool, - data: M::Data, - ) -> Self { - Self { - name: Cow::from(name), - size: None, - cmp_map, - add_meta, - data, - phantom: PhantomData, } } @@ -365,45 +305,10 @@ where size: Some(size), cmp_map, add_meta, - data: M::Data::default(), - phantom: PhantomData, } } - - /// Creates a new [`StdCmpObserver`] with the given name, map, auxiliary data, and - /// reference to variable size. - #[must_use] - pub fn with_size_data( - name: &'static str, - cmp_map: OwnedRefMut<'a, CM>, - add_meta: bool, - data: M::Data, - size: OwnedRefMut<'a, usize>, - ) -> Self { - Self { - name: Cow::from(name), - size: Some(size), - cmp_map, - add_meta, - data, - phantom: PhantomData, - } - } - - /// Handle the stored auxiliary data associated with the [`CmpObserverMetadata`] - pub fn data(&self) -> &M::Data { - &self.data - } - - /// Mutably reference the stored auxiliary data associated with the [`CmpObserverMetadata`] - pub fn data_mut(&mut self) -> &mut M::Data { - &mut self.data - } } -/// A [`StdCmpObserver`] that optionally adds comparisons into a [`CmpValuesMetadata`] -pub type StdCmpValuesObserver<'a, CM, S> = StdCmpObserver<'a, CM, S, CmpValuesMetadata>; - /* From AFL++ cmplog.h #define CMP_MAP_W 65536 @@ -500,8 +405,6 @@ impl AFLppCmpValuesMetadata { } } -#[derive(Debug, Copy, Clone, BitfieldStruct)] -#[repr(C, packed)] /// Comparison header, used to describe a set of comparison values efficiently. /// /// # Bitfields @@ -509,17 +412,33 @@ impl AFLppCmpValuesMetadata { /// - hits: The number of hits of a particular comparison /// - id: Unused by ``LibAFL``, a unique ID for a particular comparison /// - shape: Whether a comparison is u8/u8, u16/u16, etc. -/// - _type: Whether the comparison value represents an instruction (like a `cmp`) or function +/// - type_: Whether the comparison value represents an instruction (like a `cmp`) or function /// call arguments /// - attribute: OR-ed bitflags describing whether the comparison is <, >, =, <=, >=, or transform /// - overflow: Whether the comparison overflows /// - reserved: Reserved for future use +#[bitfield(u16)] +#[derive(Debug)] pub struct AFLppCmpLogHeader { - /// The header values - #[bitfield(name = "hits", ty = "u32", bits = "0..=5")] // 6 bits up to 63 entries, we have CMP_MAP_H = 32 (so using half of it) - #[bitfield(name = "shape", ty = "u32", bits = "6..=10")] // 31 + 1 bytes max - #[bitfield(name = "_type", ty = "u8", bits = "11..=11")] // 2: cmp, rtn - #[bitfield(name = "attribute", ty = "u32", bits = "12..=15")] - // 16 types for arithmetic comparison types - pub data: [u8; 2], + /// The number of hits of a particular comparison + /// + /// 6 bits up to 63 entries, we have CMP_MAP_H = 32 (so using half of it) + #[bits(0..=5, r)] + hits: u6, + /// Whether a comparison is u8/u8, u16/u16, etc. + /// + /// 31 + 1 bytes max + #[bits(6..=10, r)] + shape: u5, + /// Whether the comparison value represents an instruction (like a `cmp`) or function call + /// arguments + /// + /// 2: cmp, rtn + #[bit(11, r)] + type_: u1, + /// OR-ed bitflags describing whether the comparison is <, >, =, <=, >=, or transform + /// + /// 16 types for arithmetic comparison types + #[bits(12..=15, r)] + attribute: u4, } diff --git a/libafl/src/observers/concolic/mod.rs b/libafl/src/observers/concolic/mod.rs index c05ca9317c..9031b0f8aa 100644 --- a/libafl/src/observers/concolic/mod.rs +++ b/libafl/src/observers/concolic/mod.rs @@ -9,7 +9,9 @@ use core::{ #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -/// A `SymExprRef` identifies a [`SymExpr`] in a trace. Reading a `SymExpr` from a trace will always also yield its +/// A `SymExprRef` identifies a [`SymExpr`] in a trace. +/// +/// Reading a `SymExpr` from a trace will always also yield its /// `SymExprRef`, which can be used later in the trace to identify the `SymExpr`. /// It is also never zero, which allows for efficient use of `Option`. /// @@ -17,7 +19,9 @@ use serde::{Deserialize, Serialize}; /// `SymExprRef`s are not valid across traces. pub type SymExprRef = NonZeroUsize; -/// [`Location`]s are code locations encountered during concolic tracing, that are constructed from pointers, but not always in a meaningful way. +/// [`Location`]s are code locations encountered during concolic tracing +/// +/// [`Location`]s are constructed from pointers, but not always in a meaningful way. /// Therefore, a location is an opaque value that can only be compared against itself. /// /// It is possible to get at the underlying value using [`Into::into`], should this restriction be too inflexible for your usecase. diff --git a/libafl/src/observers/concolic/observer.rs b/libafl/src/observers/concolic/observer.rs index a3e47ed1d3..8365cc014e 100644 --- a/libafl/src/observers/concolic/observer.rs +++ b/libafl/src/observers/concolic/observer.rs @@ -3,12 +3,9 @@ use alloc::borrow::Cow; use libafl_bolts::Named; use serde::{Deserialize, Serialize}; -use crate::{ - inputs::UsesInput, - observers::{ - concolic::{serialization_format::MessageFileReader, ConcolicMetadata}, - Observer, - }, +use crate::observers::{ + concolic::{serialization_format::MessageFileReader, ConcolicMetadata}, + Observer, }; /// A standard [`ConcolicObserver`] observer, observing constraints written into a memory buffer. @@ -19,9 +16,9 @@ pub struct ConcolicObserver<'map> { name: Cow<'static, str>, } -impl<'map, S> Observer for ConcolicObserver<'map> where S: UsesInput {} +impl Observer for ConcolicObserver<'_> {} -impl<'map> ConcolicObserver<'map> { +impl ConcolicObserver<'_> { /// Create the concolic observer metadata for this run #[must_use] pub fn create_metadata_from_current_map(&self) -> ConcolicMetadata { @@ -31,7 +28,7 @@ impl<'map> ConcolicObserver<'map> { } } -impl<'map> Named for ConcolicObserver<'map> { +impl Named for ConcolicObserver<'_> { fn name(&self) -> &Cow<'static, str> { &self.name } diff --git a/libafl/src/observers/concolic/serialization_format.rs b/libafl/src/observers/concolic/serialization_format.rs index a72213a05e..72735bb702 100644 --- a/libafl/src/observers/concolic/serialization_format.rs +++ b/libafl/src/observers/concolic/serialization_format.rs @@ -242,7 +242,7 @@ impl MessageFileWriter { /// Create a `MessageFileWriter` from the given [`Write`]. pub fn from_writer(mut writer: W) -> io::Result { let writer_start_position = writer.stream_position()?; - // write dummy trace length + // write preliminary trace length writer.write_all(&0_u64.to_le_bytes())?; Ok(Self { id_counter: 1, diff --git a/libafl/src/observers/list.rs b/libafl/src/observers/list.rs index 99a50e96f5..752ab23f12 100644 --- a/libafl/src/observers/list.rs +++ b/libafl/src/observers/list.rs @@ -4,11 +4,11 @@ use core::fmt::Debug; use libafl_bolts::{ownedref::OwnedMutPtr, Error, Named}; use serde::{Deserialize, Serialize}; -use crate::{inputs::UsesInput, observers::Observer}; +use crate::observers::Observer; /// A simple observer with a list of things. #[derive(Serialize, Deserialize, Debug)] -#[serde(bound = "T: serde::de::DeserializeOwned + serde::Serialize")] +#[serde(bound = "T: Serialize + for<'a> Deserialize<'a>")] #[allow(clippy::unsafe_derive_deserialize)] pub struct ListObserver { name: Cow<'static, str>, @@ -16,10 +16,7 @@ pub struct ListObserver { list: OwnedMutPtr>, } -impl ListObserver -where - T: Debug + Serialize + serde::de::DeserializeOwned, -{ +impl ListObserver { /// Creates a new [`ListObserver`] with the given name. /// /// # Safety @@ -46,21 +43,14 @@ where } } -impl Observer for ListObserver -where - S: UsesInput, - T: Debug + Serialize + serde::de::DeserializeOwned, -{ - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { +impl Observer for ListObserver { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.list.as_mut().clear(); Ok(()) } } -impl Named for ListObserver -where - T: Debug + Serialize + serde::de::DeserializeOwned, -{ +impl Named for ListObserver { fn name(&self) -> &Cow<'static, str> { &self.name } diff --git a/libafl/src/observers/map/const_map.rs b/libafl/src/observers/map/const_map.rs index 533b7ded55..d383bc0cc3 100644 --- a/libafl/src/observers/map/const_map.rs +++ b/libafl/src/observers/map/const_map.rs @@ -5,177 +5,76 @@ use core::{ fmt::Debug, hash::{Hash, Hasher}, ops::{Deref, DerefMut}, - slice::{Iter, IterMut}, + ptr::NonNull, }; use ahash::RandomState; -use libafl_bolts::{ownedref::OwnedMutSlice, AsSlice, AsSliceMut, HasLen, Named}; -use num_traits::Bounded; -use serde::{Deserialize, Serialize}; +use libafl_bolts::{ownedref::OwnedMutSizedSlice, HasLen, Named}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ - inputs::UsesInput, - observers::{map::MapObserver, Observer}, + observers::{map::MapObserver, ConstLenMapObserver, Observer}, Error, }; /// Use a const size to speedup `Feedback::is_interesting` when the user can /// know the size of the map at compile time. #[derive(Serialize, Deserialize, Debug)] -#[serde(bound = "T: serde::de::DeserializeOwned")] #[allow(clippy::unsafe_derive_deserialize)] -pub struct ConstMapObserver<'a, T, const N: usize> -where - T: Default + Copy + 'static + Serialize, -{ - map: OwnedMutSlice<'a, T>, +pub struct ConstMapObserver<'a, T, const N: usize> { + map: OwnedMutSizedSlice<'a, T, N>, initial: T, name: Cow<'static, str>, } -impl<'a, S, T, const N: usize> Observer for ConstMapObserver<'a, T, N> +impl Observer for ConstMapObserver<'_, T, N> where - S: UsesInput, - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, Self: MapObserver, { #[inline] - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.reset_map() } } -impl<'a, T, const N: usize> Named for ConstMapObserver<'a, T, N> -where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, -{ +impl Named for ConstMapObserver<'_, T, N> { #[inline] fn name(&self) -> &Cow<'static, str> { &self.name } } -impl<'a, T, const N: usize> HasLen for ConstMapObserver<'a, T, N> -where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, -{ +impl HasLen for ConstMapObserver<'_, T, N> { #[inline] fn len(&self) -> usize { N } } -impl<'a, 'it, T, const N: usize> IntoIterator for &'it ConstMapObserver<'a, T, N> +impl Hash for ConstMapObserver<'_, T, N> where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, -{ - type Item = as Iterator>::Item; - type IntoIter = Iter<'it, T>; - - fn into_iter(self) -> Self::IntoIter { - let cnt = self.usable_count(); - self.as_slice()[..cnt].iter() - } -} - -impl<'a, 'it, T, const N: usize> IntoIterator for &'it mut ConstMapObserver<'a, T, N> -where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, -{ - type Item = as Iterator>::Item; - type IntoIter = IterMut<'it, T>; - - fn into_iter(self) -> Self::IntoIter { - let cnt = self.usable_count(); - self.as_slice_mut()[..cnt].iter_mut() - } -} - -impl<'a, T, const N: usize> ConstMapObserver<'a, T, N> -where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, -{ - /// Returns an iterator over the map. - pub fn iter(&self) -> Iter<'_, T> { - <&Self as IntoIterator>::into_iter(self) - } - - /// Returns a mutable iterator over the map. - pub fn iter_mut(&mut self) -> IterMut<'_, T> { - <&mut Self as IntoIterator>::into_iter(self) - } -} - -impl<'a, T, const N: usize> Hash for ConstMapObserver<'a, T, N> -where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, + T: Hash, { #[inline] fn hash(&self, hasher: &mut H) { - self.as_slice().hash(hasher); + self.map.as_slice().hash(hasher); } } -impl<'a, T, const N: usize> AsRef for ConstMapObserver<'a, T, N> -where - T: Default + Copy + 'static + Serialize, -{ +impl AsRef for ConstMapObserver<'_, T, N> { fn as_ref(&self) -> &Self { self } } -impl<'a, T, const N: usize> AsMut for ConstMapObserver<'a, T, N> -where - T: Default + Copy + 'static + Serialize, -{ +impl AsMut for ConstMapObserver<'_, T, N> { fn as_mut(&mut self) -> &mut Self { self } } -impl<'a, T, const N: usize> MapObserver for ConstMapObserver<'a, T, N> +impl MapObserver for ConstMapObserver<'_, T, N> where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, + T: PartialEq + Copy + Hash + Serialize + DeserializeOwned + Debug + 'static, { type Entry = T; @@ -186,19 +85,19 @@ where #[inline] fn get(&self, idx: usize) -> T { - self.as_slice()[idx] + self[idx] } #[inline] fn set(&mut self, idx: usize, val: T) { - self.map.as_slice_mut()[idx] = val; + (*self)[idx] = val; } /// Count the set bytes in the map fn count_bytes(&self) -> u64 { let initial = self.initial(); let cnt = self.usable_count(); - let map = self.as_slice(); + let map = self.map.as_slice(); let mut res = 0; for x in &map[0..cnt] { if *x != initial { @@ -209,7 +108,7 @@ where } fn usable_count(&self) -> usize { - self.as_slice().len() + self.len() } #[inline] @@ -223,7 +122,7 @@ where // Normal memset, see https://rust.godbolt.org/z/Trs5hv let initial = self.initial(); let cnt = self.usable_count(); - let map = self.as_slice_mut(); + let map = &mut (*self); for x in &mut map[0..cnt] { *x = initial; } @@ -231,14 +130,14 @@ where } fn to_vec(&self) -> Vec { - self.as_slice().to_vec() + self.map.to_vec() } /// Get the number of set entries with the specified indexes fn how_many_set(&self, indexes: &[usize]) -> usize { let initial = self.initial(); let cnt = self.usable_count(); - let map = self.as_slice(); + let map = self.map.as_slice(); let mut res = 0; for i in indexes { if *i < cnt && map[*i] != initial { @@ -249,28 +148,36 @@ where } } -impl<'a, T, const N: usize> Deref for ConstMapObserver<'a, T, N> +impl ConstLenMapObserver for ConstMapObserver<'_, T, N> where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, + T: PartialEq + Copy + Hash + Serialize + DeserializeOwned + Debug + 'static, { - type Target = [T]; - fn deref(&self) -> &[T] { + fn map_slice(&self) -> &[Self::Entry; N] { &self.map } + + fn map_slice_mut(&mut self) -> &mut [Self::Entry; N] { + &mut self.map + } } -impl<'a, T, const N: usize> DerefMut for ConstMapObserver<'a, T, N> -where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, -{ +impl Deref for ConstMapObserver<'_, T, N> { + type Target = [T]; + + fn deref(&self) -> &[T] { + self.map.as_slice() + } +} + +impl DerefMut for ConstMapObserver<'_, T, N> { fn deref_mut(&mut self) -> &mut [T] { - &mut self.map + self.map.as_mut_slice() } } impl<'a, T, const N: usize> ConstMapObserver<'a, T, N> where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, + T: Default, { /// Creates a new [`MapObserver`] /// @@ -278,36 +185,30 @@ where /// Will get a pointer to the map and dereference it at any point in time. /// The map must not move in memory! #[must_use] - pub fn new(name: &'static str, map: &'a mut [T]) -> Self { + pub fn new(name: &'static str, map: &'a mut [T; N]) -> Self { assert!(map.len() >= N); Self { - map: OwnedMutSlice::from(map), + map: OwnedMutSizedSlice::from(map), name: Cow::from(name), initial: T::default(), } } - /// Creates a new [`MapObserver`] with an owned map - #[must_use] - pub fn owned(name: &'static str, map: Vec) -> Self { - assert!(map.len() >= N); - let initial = if map.is_empty() { T::default() } else { map[0] }; - Self { - map: OwnedMutSlice::from(map), - name: Cow::from(name), - initial, - } - } - /// Creates a new [`MapObserver`] from a raw pointer /// /// # Safety /// Will dereference the `map_ptr` with up to len elements. - pub unsafe fn from_mut_ptr(name: &'static str, map_ptr: *mut T) -> Self { + #[must_use] + pub unsafe fn from_mut_ptr(name: &'static str, map_ptr: NonNull<[T; N]>) -> Self { ConstMapObserver { - map: OwnedMutSlice::from_raw_parts_mut(map_ptr, N), + map: OwnedMutSizedSlice::from_raw_mut(map_ptr), name: Cow::from(name), initial: T::default(), } } + + /// Gets the initial value for this map, mutably + pub fn initial_mut(&mut self) -> &mut T { + &mut self.initial + } } diff --git a/libafl/src/observers/map/hitcount_map.rs b/libafl/src/observers/map/hitcount_map.rs index cd8f1f7193..56a6dbfc6e 100644 --- a/libafl/src/observers/map/hitcount_map.rs +++ b/libafl/src/observers/map/hitcount_map.rs @@ -1,14 +1,21 @@ //! Hitcount map observer is for implementing AFL's hit count bucket use alloc::{borrow::Cow, vec::Vec}; -use core::{fmt::Debug, hash::Hash, mem::size_of, slice}; +use core::{ + fmt::Debug, + hash::Hash, + mem::size_of, + ops::{Deref, DerefMut}, + slice, +}; use libafl_bolts::{AsIter, AsIterMut, AsSlice, AsSliceMut, HasLen, Named, Truncate}; use serde::{Deserialize, Serialize}; use crate::{ executors::ExitKind, - inputs::UsesInput, - observers::{map::MapObserver, DifferentialObserver, Observer, ObserversTuple}, + observers::{ + map::MapObserver, ConstLenMapObserver, DifferentialObserver, Observer, VarLenMapObserver, + }, Error, }; @@ -32,20 +39,23 @@ static COUNT_CLASS_LOOKUP: [u8; 256] = [ static mut COUNT_CLASS_LOOKUP_16: Vec = vec![]; /// Initialize the 16-byte hitcounts map -/// -/// # Safety -/// -/// Calling this from multiple threads may be racey and hence leak 65k mem fn init_count_class_16() { + // # Safety + // + // Calling this from multiple threads may be racey and hence leak 65k mem or even create a broken lookup vec. + // We can live with that. unsafe { - if !COUNT_CLASS_LOOKUP_16.is_empty() { + let count_class_lookup_16 = &raw mut COUNT_CLASS_LOOKUP_16; + let count_class_lookup_16 = &mut *count_class_lookup_16; + + if !count_class_lookup_16.is_empty() { return; } - COUNT_CLASS_LOOKUP_16 = vec![0; 65536]; + *count_class_lookup_16 = vec![0; 65536]; for i in 0..256 { for j in 0..256 { - COUNT_CLASS_LOOKUP_16[(i << 8) + j] = + count_class_lookup_16[(i << 8) + j] = (u16::from(COUNT_CLASS_LOOKUP[i]) << 8) | u16::from(COUNT_CLASS_LOOKUP[j]); } } @@ -57,32 +67,36 @@ fn init_count_class_16() { /// [`MapObserver`]s that are not slice-backed, such as `MultiMapObserver`, can use /// [`HitcountsIterableMapObserver`] instead. #[derive(Serialize, Deserialize, Clone, Debug, Hash)] -#[serde(bound = "M: serde::de::DeserializeOwned")] -pub struct HitcountsMapObserver -where - M: Serialize, -{ +pub struct HitcountsMapObserver { base: M, } -impl Observer for HitcountsMapObserver +impl Deref for HitcountsMapObserver { + type Target = M; + + fn deref(&self) -> &Self::Target { + &self.base + } +} + +impl DerefMut for HitcountsMapObserver { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.base + } +} + +impl Observer for HitcountsMapObserver where - M: MapObserver + Observer + for<'a> AsSliceMut<'a, Entry = u8>, - S: UsesInput, + M: MapObserver + Observer + for<'a> AsSliceMut<'a, Entry = u8>, { #[inline] - fn pre_exec(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, state: &mut S, input: &I) -> Result<(), Error> { self.base.pre_exec(state, input) } #[inline] #[allow(clippy::cast_ptr_alignment)] - fn post_exec( - &mut self, - state: &mut S, - input: &S::Input, - exit_kind: &ExitKind, - ) -> Result<(), Error> { + fn post_exec(&mut self, state: &mut S, input: &I, exit_kind: &ExitKind) -> Result<(), Error> { let mut map = self.as_slice_mut(); let mut len = map.len(); let align_offset = map.as_ptr().align_offset(size_of::()); @@ -113,11 +127,14 @@ where let map16 = unsafe { slice::from_raw_parts_mut(map.as_mut_ptr().add(align_offset) as *mut u16, cnt) }; + let count_class_lookup_16 = &raw mut COUNT_CLASS_LOOKUP_16; + // 2022-07: Adding `enumerate` here increases execution speed/register allocation on x86_64. #[allow(clippy::unused_enumerate_index)] for (_i, item) in map16[0..cnt].iter_mut().enumerate() { unsafe { - *item = *COUNT_CLASS_LOOKUP_16.get_unchecked(*item as usize); + let count_class_lookup_16 = &mut *count_class_lookup_16; + *item = *(*count_class_lookup_16).get_unchecked(*item as usize); } } @@ -129,7 +146,7 @@ where impl Named for HitcountsMapObserver where - M: Named + Serialize + serde::de::DeserializeOwned, + M: Named, { #[inline] fn name(&self) -> &Cow<'static, str> { @@ -137,9 +154,17 @@ where } } +impl HitcountsMapObserver { + /// Creates a new [`MapObserver`] + pub fn new(base: M) -> Self { + init_count_class_16(); + Self { base } + } +} + impl HasLen for HitcountsMapObserver where - M: MapObserver, + M: HasLen, { #[inline] fn len(&self) -> usize { @@ -147,19 +172,13 @@ where } } -impl AsRef for HitcountsMapObserver -where - M: MapObserver, -{ +impl AsRef for HitcountsMapObserver { fn as_ref(&self) -> &Self { self } } -impl AsMut for HitcountsMapObserver -where - M: MapObserver, -{ +impl AsMut for HitcountsMapObserver { fn as_mut(&mut self) -> &mut Self { self } @@ -206,6 +225,7 @@ where fn hash_simple(&self) -> u64 { self.base.hash_simple() } + fn to_vec(&self) -> Vec { self.base.to_vec() } @@ -215,6 +235,40 @@ where } } +impl ConstLenMapObserver for HitcountsMapObserver +where + M: ConstLenMapObserver + MapObserver, +{ + fn map_slice(&self) -> &[Self::Entry; N] { + self.base.map_slice() + } + + fn map_slice_mut(&mut self) -> &mut [Self::Entry; N] { + self.base.map_slice_mut() + } +} + +impl VarLenMapObserver for HitcountsMapObserver +where + M: VarLenMapObserver + MapObserver, +{ + fn map_slice(&self) -> &[Self::Entry] { + self.base.map_slice() + } + + fn map_slice_mut(&mut self) -> &mut [Self::Entry] { + self.base.map_slice_mut() + } + + fn size(&self) -> &usize { + self.base.size() + } + + fn size_mut(&mut self) -> &mut usize { + self.base.size_mut() + } +} + impl Truncate for HitcountsMapObserver where M: Named + Serialize + serde::de::DeserializeOwned + Truncate, @@ -226,7 +280,7 @@ where impl<'a, M> AsSlice<'a> for HitcountsMapObserver where - M: MapObserver + AsSlice<'a>, + M: AsSlice<'a>, { type Entry = >::Entry; type SliceRef = >::SliceRef; @@ -239,7 +293,7 @@ where impl<'a, M> AsSliceMut<'a> for HitcountsMapObserver where - M: MapObserver + AsSliceMut<'a>, + M: AsSliceMut<'a>, { type SliceRefMut = >::SliceRefMut; #[inline] @@ -248,74 +302,11 @@ where } } -impl HitcountsMapObserver +impl DifferentialObserver for HitcountsMapObserver where - M: MapObserver, -{ - /// Creates a new [`MapObserver`] - pub fn new(base: M) -> Self { - init_count_class_16(); - Self { base } - } -} - -impl<'it, M> IntoIterator for &'it HitcountsMapObserver -where - M: Serialize + serde::de::DeserializeOwned, - &'it M: IntoIterator, -{ - type Item = &'it u8; - type IntoIter = <&'it M as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.base.into_iter() - } -} - -impl<'it, M> IntoIterator for &'it mut HitcountsMapObserver -where - M: Serialize + serde::de::DeserializeOwned, - &'it mut M: IntoIterator, -{ - type Item = &'it mut u8; - type IntoIter = <&'it mut M as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.base.into_iter() - } -} - -impl HitcountsMapObserver -where - M: Serialize + serde::de::DeserializeOwned, - for<'it> &'it M: IntoIterator, -{ - /// Returns an iterator over the map. - pub fn iter(&self) -> <&M as IntoIterator>::IntoIter { - <&Self as IntoIterator>::into_iter(self) - } -} - -impl HitcountsMapObserver -where - M: Serialize + serde::de::DeserializeOwned, - for<'it> &'it mut M: IntoIterator, -{ - /// Returns a mutable iterator over the map. - pub fn iter_mut(&mut self) -> <&mut M as IntoIterator>::IntoIter { - <&mut Self as IntoIterator>::into_iter(self) - } -} - -impl DifferentialObserver for HitcountsMapObserver -where - M: DifferentialObserver + M: DifferentialObserver + MapObserver - + Serialize + for<'a> AsSliceMut<'a, Entry = u8>, - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, { fn pre_observe_first(&mut self, observers: &mut OTA) -> Result<(), Error> { self.base.pre_observe_first(observers) @@ -338,33 +329,36 @@ where /// Less optimized version for non-slice iterators. /// Slice-backed observers should use a [`HitcountsMapObserver`]. #[derive(Serialize, Deserialize, Clone, Debug, Hash)] -#[serde(bound = "M: serde::de::DeserializeOwned")] -pub struct HitcountsIterableMapObserver -where - M: Serialize, -{ +pub struct HitcountsIterableMapObserver { base: M, } -impl Observer for HitcountsIterableMapObserver +impl Deref for HitcountsIterableMapObserver { + type Target = M; + + fn deref(&self) -> &Self::Target { + &self.base + } +} + +impl DerefMut for HitcountsIterableMapObserver { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.base + } +} + +impl Observer for HitcountsIterableMapObserver where - M: MapObserver + Observer, - for<'it> M: AsIterMut<'it, Item = u8>, - S: UsesInput, + M: MapObserver + Observer + for<'it> AsIterMut<'it, Item = u8>, { #[inline] - fn pre_exec(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, state: &mut S, input: &I) -> Result<(), Error> { self.base.pre_exec(state, input) } #[inline] #[allow(clippy::cast_ptr_alignment)] - fn post_exec( - &mut self, - state: &mut S, - input: &S::Input, - exit_kind: &ExitKind, - ) -> Result<(), Error> { + fn post_exec(&mut self, state: &mut S, input: &I, exit_kind: &ExitKind) -> Result<(), Error> { for mut item in self.as_iter_mut() { *item = unsafe { *COUNT_CLASS_LOOKUP.get_unchecked((*item) as usize) }; } @@ -375,7 +369,7 @@ where impl Named for HitcountsIterableMapObserver where - M: Named + Serialize + serde::de::DeserializeOwned, + M: Named, { #[inline] fn name(&self) -> &Cow<'static, str> { @@ -383,9 +377,17 @@ where } } +impl HitcountsIterableMapObserver { + /// Creates a new [`MapObserver`] + pub fn new(base: M) -> Self { + init_count_class_16(); + Self { base } + } +} + impl HasLen for HitcountsIterableMapObserver where - M: MapObserver, + M: HasLen, { #[inline] fn len(&self) -> usize { @@ -393,21 +395,13 @@ where } } -impl AsRef for HitcountsIterableMapObserver -where - M: MapObserver, - for<'it> M: AsIterMut<'it, Item = u8>, -{ +impl AsRef for HitcountsIterableMapObserver { fn as_ref(&self) -> &Self { self } } -impl AsMut for HitcountsIterableMapObserver -where - M: MapObserver, - for<'it> M: AsIterMut<'it, Item = u8>, -{ +impl AsMut for HitcountsIterableMapObserver { fn as_mut(&mut self) -> &mut Self { self } @@ -416,7 +410,6 @@ where impl MapObserver for HitcountsIterableMapObserver where M: MapObserver, - for<'it> M: AsIterMut<'it, Item = u8>, { type Entry = u8; @@ -473,97 +466,11 @@ where } } -impl HitcountsIterableMapObserver +impl DifferentialObserver for HitcountsIterableMapObserver where - M: Serialize + serde::de::DeserializeOwned, -{ - /// Creates a new [`MapObserver`] - pub fn new(base: M) -> Self { - init_count_class_16(); - Self { base } - } -} - -impl<'it, M> AsIter<'it> for HitcountsIterableMapObserver -where - M: Named + Serialize + serde::de::DeserializeOwned + AsIter<'it, Item = u8>, -{ - type Item = u8; - type Ref = >::Ref; - type IntoIter = >::IntoIter; - - fn as_iter(&'it self) -> Self::IntoIter { - self.base.as_iter() - } -} - -impl<'it, M> AsIterMut<'it> for HitcountsIterableMapObserver -where - M: Named + Serialize + serde::de::DeserializeOwned + AsIterMut<'it, Item = u8>, -{ - type RefMut = >::RefMut; - type IntoIterMut = >::IntoIterMut; - - fn as_iter_mut(&'it mut self) -> Self::IntoIterMut { - self.base.as_iter_mut() - } -} - -impl<'it, M> IntoIterator for &'it HitcountsIterableMapObserver -where - M: Serialize + serde::de::DeserializeOwned, - &'it M: IntoIterator, -{ - type Item = &'it u8; - type IntoIter = <&'it M as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.base.into_iter() - } -} - -impl<'it, M> IntoIterator for &'it mut HitcountsIterableMapObserver -where - M: Serialize + serde::de::DeserializeOwned, - &'it mut M: IntoIterator, -{ - type Item = &'it mut u8; - type IntoIter = <&'it mut M as IntoIterator>::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.base.into_iter() - } -} - -impl HitcountsIterableMapObserver -where - M: Serialize + serde::de::DeserializeOwned, - for<'it> &'it M: IntoIterator, -{ - /// Returns an iterator over the map. - pub fn iter(&self) -> <&M as IntoIterator>::IntoIter { - <&Self as IntoIterator>::into_iter(self) - } -} - -impl HitcountsIterableMapObserver -where - M: Serialize + serde::de::DeserializeOwned, - for<'it> &'it mut M: IntoIterator, -{ - /// Returns a mutable iterator over the map. - pub fn iter_mut(&mut self) -> <&mut M as IntoIterator>::IntoIter { - <&mut Self as IntoIterator>::into_iter(self) - } -} - -impl DifferentialObserver for HitcountsIterableMapObserver -where - M: MapObserver + Observer + DifferentialObserver, - for<'it> M: AsIterMut<'it, Item = u8>, - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, + M: DifferentialObserver + + MapObserver + + for<'it> AsIterMut<'it, Item = u8>, { fn pre_observe_first(&mut self, observers: &mut OTA) -> Result<(), Error> { self.base.pre_observe_first(observers) @@ -581,3 +488,28 @@ where self.base.post_observe_second(observers) } } + +impl<'it, M> AsIter<'it> for HitcountsIterableMapObserver +where + M: AsIter<'it>, +{ + type Item = M::Item; + type Ref = M::Ref; + type IntoIter = M::IntoIter; + + fn as_iter(&'it self) -> Self::IntoIter { + self.base.as_iter() + } +} + +impl<'it, M> AsIterMut<'it> for HitcountsIterableMapObserver +where + M: AsIterMut<'it>, +{ + type RefMut = M::RefMut; + type IntoIterMut = M::IntoIterMut; + + fn as_iter_mut(&'it mut self) -> Self::IntoIterMut { + self.base.as_iter_mut() + } +} diff --git a/libafl/src/observers/map/mod.rs b/libafl/src/observers/map/mod.rs index 4030a71b23..d73dedfbb3 100644 --- a/libafl/src/observers/map/mod.rs +++ b/libafl/src/observers/map/mod.rs @@ -5,18 +5,15 @@ use core::{ fmt::Debug, hash::{Hash, Hasher}, ops::{Deref, DerefMut}, - slice::{Iter, IterMut}, }; use ahash::RandomState; use libafl_bolts::{ownedref::OwnedMutSlice, AsSlice, AsSliceMut, HasLen, Named, Truncate}; -use num_traits::Bounded; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ executors::ExitKind, - inputs::UsesInput, - observers::{DifferentialObserver, Observer, ObserversTuple}, + observers::{DifferentialObserver, Observer}, Error, }; @@ -35,13 +32,15 @@ pub use multi_map::*; pub mod owned_map; pub use owned_map::*; +/// A trait indicating tracking of observed map values after testcase execution +/// /// Trait marker which indicates that this [`MapObserver`] is tracked for indices or novelties. /// Implementors of feedbacks similar to [`crate::feedbacks::MapFeedback`] may wish to use this to /// ensure that edge metadata is recorded as is appropriate for the provided observer. /// /// If you get a type constraint failure for your map due to this type being unfulfilled, you must /// call [`CanTrack::track_indices`] or [`CanTrack::track_novelties`] **at -/// the initialisation site of your map**. +/// the initialization site of your map**. /// /// This trait allows various components which interact with map metadata to ensure that the /// information they need is actually recorded by the map feedback. @@ -49,7 +48,7 @@ pub use owned_map::*; /// ``` /// # use libafl::corpus::InMemoryCorpus; /// # use libafl::feedbacks::{Feedback, MapFeedbackMetadata}; -/// use libafl::feedbacks::MaxMapFeedback; +/// use libafl::feedbacks::{MaxMapFeedback, StateInitializer}; /// # use libafl::inputs::BytesInput; /// use libafl::observers::{StdMapObserver, CanTrack}; /// use libafl::schedulers::{IndexesLenTimeMinimizerScheduler, QueueScheduler}; @@ -80,8 +79,9 @@ pub use owned_map::*; /// # InMemoryCorpus::::new(), /// # InMemoryCorpus::new(), /// # &mut feedback, -/// # &mut () +/// # &mut (), /// # ).unwrap(); +/// /// # feedback.init_state(&mut state).unwrap(); /// /// let scheduler = IndexesLenTimeMinimizerScheduler::new(&edges_observer, QueueScheduler::new()); @@ -131,18 +131,6 @@ impl CanTrack for ExplicitTracking AsRef for ExplicitTracking { - fn as_ref(&self) -> &T { - &self.0 - } -} - -impl AsMut for ExplicitTracking { - fn as_mut(&mut self) -> &mut T { - &mut self.0 - } -} - impl Named for ExplicitTracking where T: Named, @@ -152,49 +140,40 @@ where } } -impl Observer for ExplicitTracking +impl Observer for ExplicitTracking where - S: UsesInput, - T: Observer, + T: Observer, { fn flush(&mut self) -> Result<(), Error> { self.0.flush() } - fn pre_exec(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, state: &mut S, input: &I) -> Result<(), Error> { self.0.pre_exec(state, input) } - fn post_exec( - &mut self, - state: &mut S, - input: &S::Input, - exit_kind: &ExitKind, - ) -> Result<(), Error> { + fn post_exec(&mut self, state: &mut S, input: &I, exit_kind: &ExitKind) -> Result<(), Error> { self.0.post_exec(state, input, exit_kind) } - fn pre_exec_child(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { + fn pre_exec_child(&mut self, state: &mut S, input: &I) -> Result<(), Error> { self.0.pre_exec_child(state, input) } fn post_exec_child( &mut self, state: &mut S, - input: &S::Input, + input: &I, exit_kind: &ExitKind, ) -> Result<(), Error> { self.0.post_exec_child(state, input, exit_kind) } } -impl DifferentialObserver +impl DifferentialObserver for ExplicitTracking where - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, - T: DifferentialObserver, + T: DifferentialObserver, { fn pre_observe_first(&mut self, observers: &mut OTA) -> Result<(), Error> { self.as_mut().pre_observe_first(observers) @@ -213,6 +192,18 @@ where } } +impl AsRef for ExplicitTracking { + fn as_ref(&self) -> &T { + &self.0 + } +} + +impl AsMut for ExplicitTracking { + fn as_mut(&mut self) -> &mut T { + &mut self.0 + } +} + /// Module which holds the necessary functions and types for map-relevant macros, namely /// [`crate::require_index_tracking`] and [`crate::require_novelties_tracking`]. pub mod macros { @@ -243,11 +234,11 @@ pub mod macros { #[macro_export] macro_rules! require_index_tracking { ($name: literal, $obs: ident) => { - struct SanityCheck { + struct TrackingEnabledCheck { phantom: ::core::marker::PhantomData, } - impl SanityCheck { + impl TrackingEnabledCheck { #[rustfmt::skip] const MESSAGE: &'static str = { const LINE_OFFSET: usize = line!().ilog10() as usize + 2; @@ -263,7 +254,7 @@ pub mod macros { SPACING, "| ", ) }; - const TRACKING_SANITY: bool = { + const TRACKING_ENABLED: bool = { if !O::INDICES { panic!("{}", Self::MESSAGE) } else { @@ -272,13 +263,13 @@ pub mod macros { }; #[inline(always)] - fn check_sanity() { - if !Self::TRACKING_SANITY { + fn check_enabled() { + if !Self::TRACKING_ENABLED { unreachable!("{}", Self::MESSAGE); } } } - SanityCheck::<$obs>::check_sanity(); // check that tracking is enabled for this map + TrackingEnabledCheck::<$obs>::check_enabled(); // check that tracking is enabled for this map }; } @@ -306,11 +297,11 @@ pub mod macros { #[macro_export] macro_rules! require_novelties_tracking { ($name: literal, $obs: ident) => { - struct SanityCheck { + struct TrackingEnabledCheck { phantom: ::core::marker::PhantomData, } - impl SanityCheck { + impl TrackingEnabledCheck { #[rustfmt::skip] const MESSAGE: &'static str = { const LINE_OFFSET: usize = line!().ilog10() as usize + 2; @@ -327,7 +318,7 @@ pub mod macros { SPACING, "| ", ) }; - const TRACKING_SANITY: bool = { + const TRACKING_ENABLED: bool = { if !O::NOVELTIES { panic!("{}", Self::MESSAGE) } else { @@ -336,13 +327,13 @@ pub mod macros { }; #[inline(always)] - fn check_sanity() { - if !Self::TRACKING_SANITY { + fn check_enabled() { + if !Self::TRACKING_ENABLED { unreachable!("{}", Self::MESSAGE); } } } - SanityCheck::<$obs>::check_sanity(); // check that tracking is enabled for this map + TrackingEnabledCheck::<$obs>::check_enabled(); // check that tracking is enabled for this map }; } } @@ -359,12 +350,12 @@ pub mod macros { /// /// TODO: enforce `iter() -> AssociatedTypeIter` when generic associated types stabilize pub trait MapObserver: - HasLen + Named + Serialize + serde::de::DeserializeOwned + AsRef + AsMut + Hash + HasLen + Named + Serialize + DeserializeOwned + AsRef + AsMut + Hash // where // for<'it> &'it Self: IntoIterator { /// Type of each entry in this map - type Entry: Bounded + PartialEq + Default + Copy + Debug + Hash + 'static; + type Entry: PartialEq + Copy; /// Get the value at `idx` fn get(&self, idx: usize) -> Self::Entry; @@ -394,6 +385,36 @@ pub trait MapObserver: fn how_many_set(&self, indexes: &[usize]) -> usize; } +/// The "real" length of the underlying map could change at any point in time. +/// Thus, the size of the map should be fetched each time it is used. +pub trait VarLenMapObserver: MapObserver { + /// A mutable slice reference to the map. + /// The length of the map gives the maximum allocatable size. + fn map_slice(&self) -> &[Self::Entry]; + + /// A slice reference to the map. + /// The length of the map gives the maximum allocatable size. + fn map_slice_mut(&mut self) -> &mut [Self::Entry]; + + /// A reference to the size of the map. + fn size(&self) -> &usize; + + /// A mutable reference to the size of the map. + fn size_mut(&mut self) -> &mut usize; +} + +/// Implementors guarantee the size of the map is constant at any point in time and equals N. +pub trait ConstLenMapObserver: MapObserver { + /// The size of the map + const LENGTH: usize = N; + + /// A mutable slice reference to the map + fn map_slice(&self) -> &[Self::Entry; N]; + + /// A mutable slice reference to the map + fn map_slice_mut(&mut self) -> &mut [Self::Entry; N]; +} + impl CanTrack for M where M: MapObserver, @@ -416,147 +437,42 @@ where /// that will get updated by the target. /// A well-known example is the AFL-Style coverage map. #[derive(Clone, Serialize, Deserialize, Debug)] -#[serde(bound = "T: serde::de::DeserializeOwned")] #[allow(clippy::unsafe_derive_deserialize)] -pub struct StdMapObserver<'a, T, const DIFFERENTIAL: bool> -where - T: Default + Copy + 'static + Serialize, -{ +pub struct StdMapObserver<'a, T, const DIFFERENTIAL: bool> { map: OwnedMutSlice<'a, T>, initial: T, name: Cow<'static, str>, } -impl<'a, S, T> Observer for StdMapObserver<'a, T, false> +impl Observer for StdMapObserver<'_, T, false> where - S: UsesInput, - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, + Self: MapObserver, { #[inline] - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.reset_map() } } -impl<'a, S, T> Observer for StdMapObserver<'a, T, true> -where - S: UsesInput, - T: Bounded - + PartialEq - + Default - + Copy - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, -{ -} +impl Observer for StdMapObserver<'_, T, true> {} -impl<'a, T, const DIFFERENTIAL: bool> Named for StdMapObserver<'a, T, DIFFERENTIAL> -where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, -{ +impl Named for StdMapObserver<'_, T, DIFFERENTIAL> { #[inline] fn name(&self) -> &Cow<'static, str> { &self.name } } -impl<'a, T, const DIFFERENTIAL: bool> HasLen for StdMapObserver<'a, T, DIFFERENTIAL> -where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, -{ +impl HasLen for StdMapObserver<'_, T, DIFFERENTIAL> { #[inline] fn len(&self) -> usize { self.map.as_slice().len() } } -impl<'a, 'it, T, const DIFFERENTIAL: bool> IntoIterator for &'it StdMapObserver<'a, T, DIFFERENTIAL> +impl Hash for StdMapObserver<'_, T, DIFFERENTIAL> where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, -{ - type Item = as Iterator>::Item; - type IntoIter = Iter<'it, T>; - - fn into_iter(self) -> Self::IntoIter { - let cnt = self.usable_count(); - self.as_slice()[..cnt].iter() - } -} - -impl<'a, 'it, T, const DIFFERENTIAL: bool> IntoIterator - for &'it mut StdMapObserver<'a, T, DIFFERENTIAL> -where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, -{ - type Item = as Iterator>::Item; - type IntoIter = IterMut<'it, T>; - - fn into_iter(self) -> Self::IntoIter { - let cnt = self.usable_count(); - self.as_slice_mut()[..cnt].iter_mut() - } -} - -impl<'a, T, const DIFFERENTIAL: bool> StdMapObserver<'a, T, DIFFERENTIAL> -where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, -{ - /// Returns an iterator over the map. - pub fn iter(&self) -> Iter<'_, T> { - <&Self as IntoIterator>::into_iter(self) - } - - /// Returns a mutable iterator over the map. - pub fn iter_mut(&mut self) -> IterMut<'_, T> { - <&mut Self as IntoIterator>::into_iter(self) - } -} - -impl<'a, T, const DIFFERENTIAL: bool> Hash for StdMapObserver<'a, T, DIFFERENTIAL> -where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, + T: Hash, { #[inline] fn hash(&self, hasher: &mut H) { @@ -564,35 +480,21 @@ where } } -impl<'a, T, const DIFFERENTIAL: bool> AsRef for StdMapObserver<'a, T, DIFFERENTIAL> -where - T: Default + Copy + 'static + Serialize, -{ +impl AsRef for StdMapObserver<'_, T, DIFFERENTIAL> { fn as_ref(&self) -> &Self { self } } -impl<'a, T, const DIFFERENTIAL: bool> AsMut for StdMapObserver<'a, T, DIFFERENTIAL> -where - T: Default + Copy + 'static + Serialize, -{ +impl AsMut for StdMapObserver<'_, T, DIFFERENTIAL> { fn as_mut(&mut self) -> &mut Self { self } } -impl<'a, T, const DIFFERENTIAL: bool> MapObserver for StdMapObserver<'a, T, DIFFERENTIAL> +impl MapObserver for StdMapObserver<'_, T, DIFFERENTIAL> where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, + T: PartialEq + Copy + Hash + Serialize + DeserializeOwned + Debug, { type Entry = T; @@ -665,36 +567,20 @@ where } } -impl<'a, T, const DIFFERENTIAL: bool> Truncate for StdMapObserver<'a, T, DIFFERENTIAL> -where - T: Bounded - + PartialEq - + Default - + Copy - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, -{ +impl Truncate for StdMapObserver<'_, T, DIFFERENTIAL> { fn truncate(&mut self, new_len: usize) { self.map.truncate(new_len); } } -impl<'a, T, const DIFFERENTIAL: bool> Deref for StdMapObserver<'a, T, DIFFERENTIAL> -where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, -{ +impl Deref for StdMapObserver<'_, T, DIFFERENTIAL> { type Target = [T]; fn deref(&self) -> &[T] { &self.map } } -impl<'a, T, const DIFFERENTIAL: bool> DerefMut for StdMapObserver<'a, T, DIFFERENTIAL> -where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Debug, -{ +impl DerefMut for StdMapObserver<'_, T, DIFFERENTIAL> { fn deref_mut(&mut self) -> &mut [T] { &mut self.map } @@ -702,7 +588,7 @@ where impl<'a, T, const DIFFERENTIAL: bool> StdMapObserver<'a, T, DIFFERENTIAL> where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, + T: Default, { /// Creates a new [`MapObserver`] /// @@ -793,7 +679,7 @@ where impl<'a, T> StdMapObserver<'a, T, false> where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, + T: Default, { /// Creates a new [`MapObserver`] /// @@ -851,7 +737,7 @@ where impl<'a, T> StdMapObserver<'a, T, true> where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned, + T: Default, { /// Creates a new [`MapObserver`] in differential mode /// @@ -899,18 +785,4 @@ where } } -impl<'a, OTA, OTB, S, T> DifferentialObserver for StdMapObserver<'a, T, true> -where - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, - T: Bounded - + PartialEq - + Default - + Copy - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug, -{ -} +impl DifferentialObserver for StdMapObserver<'_, T, true> {} diff --git a/libafl/src/observers/map/multi_map.rs b/libafl/src/observers/map/multi_map.rs index 0a835e4969..7c0bf533b9 100644 --- a/libafl/src/observers/map/multi_map.rs +++ b/libafl/src/observers/map/multi_map.rs @@ -5,8 +5,7 @@ use core::{ fmt::Debug, hash::{Hash, Hasher}, iter::Flatten, - mem::size_of, - slice::{self, Iter, IterMut}, + slice::{Iter, IterMut}, }; use ahash::RandomState; @@ -14,23 +13,17 @@ use libafl_bolts::{ ownedref::OwnedMutSlice, AsIter, AsIterMut, AsSlice, AsSliceMut, HasLen, Named, }; use meminterval::IntervalTree; -use num_traits::Bounded; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ - inputs::UsesInput, - observers::{map::MapObserver, DifferentialObserver, Observer, ObserversTuple}, + observers::{map::MapObserver, DifferentialObserver, Observer}, Error, }; /// The Multi Map Observer merge different maps into one observer #[derive(Serialize, Deserialize, Debug)] -#[serde(bound = "T: serde::de::DeserializeOwned")] #[allow(clippy::unsafe_derive_deserialize)] -pub struct MultiMapObserver<'a, T, const DIFFERENTIAL: bool> -where - T: 'static + Default + Copy + Serialize + Debug, -{ +pub struct MultiMapObserver<'a, T, const DIFFERENTIAL: bool> { maps: Vec>, intervals: IntervalTree, len: usize, @@ -39,92 +32,62 @@ where iter_idx: usize, } -impl<'a, S, T> Observer for MultiMapObserver<'a, T, false> +impl Observer for MultiMapObserver<'_, T, false> where - S: UsesInput, - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, Self: MapObserver, { #[inline] - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.reset_map() } } -impl<'a, S, T> Observer for MultiMapObserver<'a, T, true> -where - S: UsesInput, - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, - Self: MapObserver, -{ +impl Observer for MultiMapObserver<'_, T, true> { // in differential mode, we are *not* responsible for resetting the map! } -impl<'a, T, const DIFFERENTIAL: bool> Named for MultiMapObserver<'a, T, DIFFERENTIAL> -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, -{ +impl Named for MultiMapObserver<'_, T, DIFFERENTIAL> { #[inline] fn name(&self) -> &Cow<'static, str> { &self.name } } -impl<'a, T, const DIFFERENTIAL: bool> HasLen for MultiMapObserver<'a, T, DIFFERENTIAL> -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, -{ +impl HasLen for MultiMapObserver<'_, T, DIFFERENTIAL> { #[inline] fn len(&self) -> usize { self.len } } -impl<'a, T, const DIFFERENTIAL: bool> Hash for MultiMapObserver<'a, T, DIFFERENTIAL> +impl Hash for MultiMapObserver<'_, T, DIFFERENTIAL> where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, + T: Hash, { fn hash(&self, hasher: &mut H) { for map in &self.maps { let slice = map.as_slice(); - let ptr = slice.as_ptr() as *const u8; - let map_size = slice.len() / size_of::(); - unsafe { - hasher.write(slice::from_raw_parts(ptr, map_size)); - } + + slice.hash(hasher); } } } -impl<'a, T, const DIFFERENTIAL: bool> AsRef for MultiMapObserver<'a, T, DIFFERENTIAL> -where - T: 'static + Default + Copy + Serialize + Debug, -{ +impl AsRef for MultiMapObserver<'_, T, DIFFERENTIAL> { fn as_ref(&self) -> &Self { self } } -impl<'a, T, const DIFFERENTIAL: bool> AsMut for MultiMapObserver<'a, T, DIFFERENTIAL> -where - T: 'static + Default + Copy + Serialize + Debug, -{ +impl AsMut for MultiMapObserver<'_, T, DIFFERENTIAL> { fn as_mut(&mut self) -> &mut Self { self } } -impl<'a, T, const DIFFERENTIAL: bool> MapObserver for MultiMapObserver<'a, T, DIFFERENTIAL> +impl MapObserver for MultiMapObserver<'_, T, DIFFERENTIAL> where - T: 'static - + Bounded - + PartialEq - + Default - + Copy - + Hash - + Serialize - + serde::de::DeserializeOwned - + Debug, + T: PartialEq + Copy + Hash + Serialize + DeserializeOwned + Debug, { type Entry = T; @@ -206,7 +169,7 @@ where impl<'a, T, const DIFFERENTIAL: bool> MultiMapObserver<'a, T, DIFFERENTIAL> where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, + T: Default, { /// Creates a new [`MultiMapObserver`], maybe in differential mode #[must_use] @@ -231,7 +194,7 @@ where impl<'a, T> MultiMapObserver<'a, T, true> where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, + T: Default, { /// Creates a new [`MultiMapObserver`] in differential mode #[must_use] @@ -242,7 +205,7 @@ where impl<'a, T> MultiMapObserver<'a, T, false> where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, + T: Default, { /// Creates a new [`MultiMapObserver`] #[must_use] @@ -279,7 +242,7 @@ where impl<'a, 'it, T, const DIFFERENTIAL: bool> AsIter<'it> for MultiMapObserver<'a, T, DIFFERENTIAL> where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, + T: 'a, 'a: 'it, { type Item = T; @@ -293,7 +256,7 @@ where impl<'a, 'it, T, const DIFFERENTIAL: bool> AsIterMut<'it> for MultiMapObserver<'a, T, DIFFERENTIAL> where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, + T: 'a, 'a: 'it, { type RefMut = &'it mut T; @@ -304,53 +267,4 @@ where } } -impl<'a, 'it, T, const DIFFERENTIAL: bool> IntoIterator - for &'it MultiMapObserver<'a, T, DIFFERENTIAL> -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, -{ - type Item = as Iterator>::Item; - type IntoIter = Flatten>>; - - fn into_iter(self) -> Self::IntoIter { - self.maps.iter().flatten() - } -} - -impl<'a, 'it, T, const DIFFERENTIAL: bool> IntoIterator - for &'it mut MultiMapObserver<'a, T, DIFFERENTIAL> -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, -{ - type Item = as Iterator>::Item; - type IntoIter = Flatten>>; - - fn into_iter(self) -> Self::IntoIter { - self.maps.iter_mut().flatten() - } -} - -impl<'a, T, const DIFFERENTIAL: bool> MultiMapObserver<'a, T, DIFFERENTIAL> -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, -{ - /// Returns an iterator over the map. - pub fn iter(&self) -> <&Self as IntoIterator>::IntoIter { - <&Self as IntoIterator>::into_iter(self) - } - - /// Returns a mutable iterator over the map. - pub fn iter_mut(&mut self) -> <&mut Self as IntoIterator>::IntoIter { - <&mut Self as IntoIterator>::into_iter(self) - } -} - -impl<'a, T, OTA, OTB, S> DifferentialObserver for MultiMapObserver<'a, T, true> -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, - Self: MapObserver, - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, -{ -} +impl DifferentialObserver for MultiMapObserver<'_, T, true> {} diff --git a/libafl/src/observers/map/owned_map.rs b/libafl/src/observers/map/owned_map.rs index 518eb9d2da..66d0fc7e8b 100644 --- a/libafl/src/observers/map/owned_map.rs +++ b/libafl/src/observers/map/owned_map.rs @@ -5,107 +5,53 @@ use core::{ fmt::Debug, hash::{Hash, Hasher}, ops::{Deref, DerefMut}, - slice::{Iter, IterMut}, }; use ahash::RandomState; use libafl_bolts::{AsSlice, AsSliceMut, HasLen, Named}; -use num_traits::Bounded; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ - inputs::UsesInput, observers::{map::MapObserver, Observer}, Error, }; /// Exact copy of `StdMapObserver` that owns its map #[derive(Serialize, Deserialize, Debug, Clone)] -#[serde(bound = "T: serde::de::DeserializeOwned")] #[allow(clippy::unsafe_derive_deserialize)] -pub struct OwnedMapObserver -where - T: 'static + Default + Copy + Serialize, -{ +pub struct OwnedMapObserver { map: Vec, initial: T, name: Cow<'static, str>, } -impl Observer for OwnedMapObserver +impl Observer for OwnedMapObserver where - S: UsesInput, - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, Self: MapObserver, { #[inline] - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.reset_map() } } -impl Named for OwnedMapObserver -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned, -{ +impl Named for OwnedMapObserver { #[inline] fn name(&self) -> &Cow<'static, str> { &self.name } } -impl HasLen for OwnedMapObserver -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned, -{ +impl HasLen for OwnedMapObserver { #[inline] fn len(&self) -> usize { self.map.as_slice().len() } } -impl<'it, T> IntoIterator for &'it OwnedMapObserver -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, -{ - type Item = as Iterator>::Item; - type IntoIter = Iter<'it, T>; - - fn into_iter(self) -> Self::IntoIter { - self.as_slice().iter() - } -} - -impl<'it, T> IntoIterator for &'it mut OwnedMapObserver -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, -{ - type Item = as Iterator>::Item; - type IntoIter = IterMut<'it, T>; - - fn into_iter(self) -> Self::IntoIter { - self.as_slice_mut().iter_mut() - } -} - -impl OwnedMapObserver -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, -{ - /// Returns an iterator over the map. - pub fn iter(&self) -> Iter<'_, T> { - <&Self as IntoIterator>::into_iter(self) - } - - /// Returns a mutable iterator over the map. - pub fn iter_mut(&mut self) -> IterMut<'_, T> { - <&mut Self as IntoIterator>::into_iter(self) - } -} - impl Hash for OwnedMapObserver where - T: 'static + Hash + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, + T: Hash, { #[inline] fn hash(&self, hasher: &mut H) { @@ -113,19 +59,13 @@ where } } -impl AsRef for OwnedMapObserver -where - T: 'static + Default + Copy + Serialize, -{ +impl AsRef for OwnedMapObserver { fn as_ref(&self) -> &Self { self } } -impl AsMut for OwnedMapObserver -where - T: 'static + Default + Copy + Serialize, -{ +impl AsMut for OwnedMapObserver { fn as_mut(&mut self) -> &mut Self { self } @@ -133,15 +73,7 @@ where impl MapObserver for OwnedMapObserver where - T: 'static - + Bounded - + PartialEq - + Default - + Copy - + Hash - + Serialize - + serde::de::DeserializeOwned - + Debug, + T: PartialEq + Copy + Hash + Serialize + DeserializeOwned + Debug, { type Entry = T; @@ -214,10 +146,7 @@ where } } -impl Deref for OwnedMapObserver -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, -{ +impl Deref for OwnedMapObserver { type Target = [T]; fn deref(&self) -> &[T] { @@ -225,10 +154,7 @@ where } } -impl DerefMut for OwnedMapObserver -where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + Debug, -{ +impl DerefMut for OwnedMapObserver { fn deref_mut(&mut self) -> &mut [T] { &mut self.map } @@ -236,7 +162,7 @@ where impl OwnedMapObserver where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned, + T: Copy + Default, { /// Creates a new [`MapObserver`] with an owned map #[must_use] diff --git a/libafl/src/observers/map/variable_map.rs b/libafl/src/observers/map/variable_map.rs index 381b837d0f..ec1a5feda3 100644 --- a/libafl/src/observers/map/variable_map.rs +++ b/libafl/src/observers/map/variable_map.rs @@ -5,7 +5,6 @@ use core::{ fmt::Debug, hash::{Hash, Hasher}, ops::{Deref, DerefMut}, - slice::{Iter, IterMut}, }; use ahash::RandomState; @@ -13,152 +12,50 @@ use libafl_bolts::{ ownedref::{OwnedMutPtr, OwnedMutSlice}, AsSlice, AsSliceMut, HasLen, Named, }; -use num_traits::Bounded; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use crate::{ - inputs::UsesInput, - observers::{map::MapObserver, Observer}, + observers::{map::MapObserver, Observer, VarLenMapObserver}, Error, }; /// Overlooking a variable bitmap #[derive(Serialize, Deserialize, Debug)] -#[serde(bound = "T: serde::de::DeserializeOwned")] #[allow(clippy::unsafe_derive_deserialize)] -pub struct VariableMapObserver<'a, T> -where - T: Default + Copy + 'static + Serialize + PartialEq + Bounded, -{ +pub struct VariableMapObserver<'a, T> { map: OwnedMutSlice<'a, T>, size: OwnedMutPtr, initial: T, name: Cow<'static, str>, } -impl<'a, S, T> Observer for VariableMapObserver<'a, T> +impl Observer for VariableMapObserver<'_, T> where - S: UsesInput, - T: Default - + Copy - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug - + Bounded - + PartialEq, Self: MapObserver, { #[inline] - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.reset_map() } } -impl<'a, T> Named for VariableMapObserver<'a, T> -where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + Bounded + PartialEq, -{ +impl Named for VariableMapObserver<'_, T> { #[inline] fn name(&self) -> &Cow<'static, str> { &self.name } } -impl<'a, T> HasLen for VariableMapObserver<'a, T> -where - T: Default + Copy + 'static + Serialize + serde::de::DeserializeOwned + PartialEq + Bounded, -{ +impl HasLen for VariableMapObserver<'_, T> { #[inline] fn len(&self) -> usize { *self.size.as_ref() } } -impl<'a, 'it, T> IntoIterator for &'it VariableMapObserver<'a, T> +impl Hash for VariableMapObserver<'_, T> where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug - + PartialEq - + Bounded, -{ - type Item = as Iterator>::Item; - type IntoIter = Iter<'it, T>; - - fn into_iter(self) -> Self::IntoIter { - let cnt = self.usable_count(); - self.as_slice()[..cnt].iter() - } -} - -impl<'a, 'it, T> IntoIterator for &'it mut VariableMapObserver<'a, T> -where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug - + PartialEq - + Bounded, -{ - type Item = as Iterator>::Item; - type IntoIter = IterMut<'it, T>; - - fn into_iter(self) -> Self::IntoIter { - let cnt = self.usable_count(); - self.as_slice_mut()[..cnt].iter_mut() - } -} - -impl<'a, T> VariableMapObserver<'a, T> -where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug - + PartialEq - + Bounded, -{ - /// Returns an iterator over the map. - pub fn iter(&self) -> Iter<'_, T> { - <&Self as IntoIterator>::into_iter(self) - } - - /// Returns a mutable iterator over the map. - pub fn iter_mut(&mut self) -> IterMut<'_, T> { - <&mut Self as IntoIterator>::into_iter(self) - } -} - -impl<'a, T> Hash for VariableMapObserver<'a, T> -where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug - + PartialEq - + Bounded, + T: Hash, { #[inline] fn hash(&self, hasher: &mut H) { @@ -166,37 +63,21 @@ where } } -impl<'a, T> AsRef for VariableMapObserver<'a, T> -where - T: Default + Copy + 'static + Serialize + PartialEq + Bounded, -{ +impl AsRef for VariableMapObserver<'_, T> { fn as_ref(&self) -> &Self { self } } -impl<'a, T> AsMut for VariableMapObserver<'a, T> -where - T: Default + Copy + 'static + Serialize + PartialEq + Bounded, -{ +impl AsMut for VariableMapObserver<'_, T> { fn as_mut(&mut self) -> &mut Self { self } } -impl<'a, T> MapObserver for VariableMapObserver<'a, T> +impl MapObserver for VariableMapObserver<'_, T> where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug - + PartialEq - + Bounded, + T: PartialEq + Copy + Hash + Serialize + DeserializeOwned + Debug, { type Entry = T; @@ -268,48 +149,45 @@ where } } -impl<'a, T> Deref for VariableMapObserver<'a, T> +impl VarLenMapObserver for VariableMapObserver<'_, T> where - T: Bounded - + PartialEq - + Default - + Copy - + Hash - + 'static - + Serialize - + serde::de::DeserializeOwned - + Debug - + PartialEq - + Bounded, + T: PartialEq + Copy + Hash + Serialize + DeserializeOwned + Debug, { + fn map_slice(&self) -> &[Self::Entry] { + self.map.as_ref() + } + + fn map_slice_mut(&mut self) -> &mut [Self::Entry] { + self.map.as_mut() + } + + fn size(&self) -> &usize { + self.size.as_ref() + } + + fn size_mut(&mut self) -> &mut usize { + self.size.as_mut() + } +} + +impl Deref for VariableMapObserver<'_, T> { type Target = [T]; fn deref(&self) -> &[T] { - let cnt = self.usable_count(); + let cnt = *self.size.as_ref(); &self.map[..cnt] } } -impl<'a, T> DerefMut for VariableMapObserver<'a, T> -where - T: 'static - + Default - + Copy - + Hash - + Serialize - + serde::de::DeserializeOwned - + Debug - + PartialEq - + Bounded, -{ +impl DerefMut for VariableMapObserver<'_, T> { fn deref_mut(&mut self) -> &mut [T] { - let cnt = self.usable_count(); + let cnt = *self.size.as_ref(); &mut self.map[..cnt] } } impl<'a, T> VariableMapObserver<'a, T> where - T: 'static + Default + Copy + Serialize + serde::de::DeserializeOwned + PartialEq + Bounded, + T: Default, { /// Creates a new [`MapObserver`] from an [`OwnedMutSlice`] /// diff --git a/libafl/src/observers/mod.rs b/libafl/src/observers/mod.rs index d27ff4860a..d312b1e693 100644 --- a/libafl/src/observers/mod.rs +++ b/libafl/src/observers/mod.rs @@ -39,14 +39,11 @@ pub use list::*; use serde::{Deserialize, Serialize}; pub use value::*; -use crate::{executors::ExitKind, inputs::UsesInput, state::UsesState, Error}; +use crate::{executors::ExitKind, Error}; /// Observers observe different information about the target. /// They can then be used by various sorts of feedback. -pub trait Observer: Named -where - S: UsesInput, -{ +pub trait Observer: Named { /// The testcase finished execution, calculate any changes. /// Reserved for future use. #[inline] @@ -56,7 +53,7 @@ where /// Called right before execution starts. #[inline] - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { Ok(()) } @@ -65,7 +62,7 @@ where fn post_exec( &mut self, _state: &mut S, - _input: &S::Input, + _input: &I, _exit_kind: &ExitKind, ) -> Result<(), Error> { Ok(()) @@ -73,7 +70,7 @@ where /// Called right before execution starts in the child process, if any. #[inline] - fn pre_exec_child(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec_child(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { Ok(()) } @@ -82,86 +79,72 @@ where fn post_exec_child( &mut self, _state: &mut S, - _input: &S::Input, + _input: &I, _exit_kind: &ExitKind, ) -> Result<(), Error> { Ok(()) } } -/// Defines the observer type shared across traits of the type. -/// Needed for consistency across HasCorpus/HasSolutions and friends. -pub trait UsesObservers: UsesState { - /// The observers type - type Observers: ObserversTuple; -} - /// A haskell-style tuple of observers -pub trait ObserversTuple: MatchName -where - S: UsesInput, -{ +pub trait ObserversTuple: MatchName { /// This is called right before the next execution. - fn pre_exec_all(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error>; + fn pre_exec_all(&mut self, state: &mut S, input: &I) -> Result<(), Error>; /// This is called right after the last execution fn post_exec_all( &mut self, state: &mut S, - input: &S::Input, + input: &I, exit_kind: &ExitKind, ) -> Result<(), Error>; /// This is called right before the next execution in the child process, if any. - fn pre_exec_child_all(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error>; + fn pre_exec_child_all(&mut self, state: &mut S, input: &I) -> Result<(), Error>; /// This is called right after the last execution in the child process, if any. fn post_exec_child_all( &mut self, state: &mut S, - input: &S::Input, + input: &I, exit_kind: &ExitKind, ) -> Result<(), Error>; } -impl ObserversTuple for () -where - S: UsesInput, -{ - fn pre_exec_all(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { +impl ObserversTuple for () { + fn pre_exec_all(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { Ok(()) } fn post_exec_all( &mut self, _state: &mut S, - _input: &S::Input, + _input: &I, _exit_kind: &ExitKind, ) -> Result<(), Error> { Ok(()) } - fn pre_exec_child_all(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec_child_all(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { Ok(()) } fn post_exec_child_all( &mut self, _state: &mut S, - _input: &S::Input, + _input: &I, _exit_kind: &ExitKind, ) -> Result<(), Error> { Ok(()) } } -impl ObserversTuple for (Head, Tail) +impl ObserversTuple for (Head, Tail) where - Head: Observer, - Tail: ObserversTuple, - S: UsesInput, + Head: Observer, + Tail: ObserversTuple, { - fn pre_exec_all(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { + fn pre_exec_all(&mut self, state: &mut S, input: &I) -> Result<(), Error> { self.0.pre_exec(state, input)?; self.1.pre_exec_all(state, input) } @@ -169,14 +152,14 @@ where fn post_exec_all( &mut self, state: &mut S, - input: &S::Input, + input: &I, exit_kind: &ExitKind, ) -> Result<(), Error> { self.0.post_exec(state, input, exit_kind)?; self.1.post_exec_all(state, input, exit_kind) } - fn pre_exec_child_all(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { + fn pre_exec_child_all(&mut self, state: &mut S, input: &I) -> Result<(), Error> { self.0.pre_exec_child(state, input)?; self.1.pre_exec_child_all(state, input) } @@ -184,7 +167,7 @@ where fn post_exec_child_all( &mut self, state: &mut S, - input: &S::Input, + input: &I, exit_kind: &ExitKind, ) -> Result<(), Error> { self.0.post_exec_child(state, input, exit_kind)?; @@ -219,12 +202,7 @@ pub trait ObserverWithHashField { /// `DifferentialObserver::{pre,post}_observe_{first,second}` as necessary for first and second, /// respectively. #[allow(unused_variables)] -pub trait DifferentialObserver: Observer -where - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, -{ +pub trait DifferentialObserver: Observer { /// Perform an operation with the first set of observers before they are `pre_exec`'d. fn pre_observe_first(&mut self, observers: &mut OTA) -> Result<(), Error> { Ok(()) @@ -247,12 +225,7 @@ where } /// Differential observers tuple, for when you're using multiple differential observers. -pub trait DifferentialObserversTuple: ObserversTuple -where - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, -{ +pub trait DifferentialObserversTuple: ObserversTuple { /// Perform an operation with the first set of observers before they are `pre_exec`'d on all the /// differential observers in this tuple. fn pre_observe_first_all(&mut self, observers: &mut OTA) -> Result<(), Error>; @@ -270,12 +243,7 @@ where fn post_observe_second_all(&mut self, observers: &mut OTB) -> Result<(), Error>; } -impl DifferentialObserversTuple for () -where - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, -{ +impl DifferentialObserversTuple for () { fn pre_observe_first_all(&mut self, _: &mut OTA) -> Result<(), Error> { Ok(()) } @@ -293,13 +261,10 @@ where } } -impl DifferentialObserversTuple for (Head, Tail) +impl DifferentialObserversTuple for (Head, Tail) where - Head: DifferentialObserver, - Tail: DifferentialObserversTuple, - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, + Head: DifferentialObserver, + Tail: DifferentialObserversTuple, { fn pre_observe_first_all(&mut self, observers: &mut OTA) -> Result<(), Error> { self.0.pre_observe_first(observers)?; @@ -387,19 +352,16 @@ impl TimeObserver { } } -impl Observer for TimeObserver -where - S: UsesInput, -{ +impl Observer for TimeObserver { #[cfg(feature = "std")] - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.last_runtime = None; self.start_time = Instant::now(); Ok(()) } #[cfg(not(feature = "std"))] - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.last_runtime = None; self.start_time = current_time(); Ok(()) @@ -409,7 +371,7 @@ where fn post_exec( &mut self, _state: &mut S, - _input: &S::Input, + _input: &I, _exit_kind: &ExitKind, ) -> Result<(), Error> { self.last_runtime = Some(self.start_time.elapsed()); @@ -420,7 +382,7 @@ where fn post_exec( &mut self, _state: &mut S, - _input: &S::Input, + _input: &I, _exit_kind: &ExitKind, ) -> Result<(), Error> { self.last_runtime = current_time().checked_sub(self.start_time); @@ -434,20 +396,12 @@ impl Named for TimeObserver { } } -impl DifferentialObserver for TimeObserver -where - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, -{ -} +impl DifferentialObserver for TimeObserver {} #[cfg(feature = "std")] #[cfg(test)] mod tests { - use core::ptr::addr_of_mut; - use libafl_bolts::{ ownedref::OwnedMutSlice, tuples::{tuple_list, tuple_list_type}, @@ -460,10 +414,12 @@ mod tests { #[test] fn test_observer_serde() { + let map_ptr = &raw const MAP; let obv = tuple_list!(TimeObserver::new("time"), unsafe { + let len = (*map_ptr).len(); StdMapObserver::from_ownedref( "map", - OwnedMutSlice::from_raw_parts_mut(addr_of_mut!(MAP) as *mut u32, MAP.len()), + OwnedMutSlice::from_raw_parts_mut(&raw mut MAP as *mut u32, len), ) }); let vec = postcard::to_allocvec(&obv).unwrap(); diff --git a/libafl/src/observers/profiling.rs b/libafl/src/observers/profiling.rs index c6c0e9bee9..8f73169f26 100644 --- a/libafl/src/observers/profiling.rs +++ b/libafl/src/observers/profiling.rs @@ -5,7 +5,7 @@ use hashbrown::HashMap; use libafl_bolts::{ownedref::OwnedMutPtr, Named}; use serde::{Deserialize, Serialize}; -use crate::{inputs::UsesInput, observers::Observer, state::State, Error}; +use crate::{observers::Observer, Error}; #[derive(Debug, Serialize, Deserialize)] /// The json data pub struct FunctionData { @@ -85,9 +85,12 @@ impl ProfilingObserver { where P: AsRef, { - let f = File::open(json_path)?; + let f = File::open(json_path.as_ref())?; let reader = BufReader::new(f); - let analysis_data: AnalysisData = serde_json::from_reader(reader)?; + let analysis_data: AnalysisData = serde_json::from_reader(reader).map_err(|err| { + let path = json_path.as_ref().to_string_lossy(); + Error::illegal_argument(format!("Failed to read from path {path}: {err:?}")) + })?; // debug /* for record in &analysis_data.data { @@ -124,14 +127,11 @@ impl Named for ProfilingObserver { } } -impl Observer for ProfilingObserver -where - S: State, -{ +impl Observer for ProfilingObserver { fn post_exec( &mut self, _state: &mut S, - _input: &::Input, + _input: &I, _exit_kind: &crate::executors::ExitKind, ) -> Result<(), Error> { // in reality, this should be done in a stage diff --git a/libafl/src/observers/stacktrace.rs b/libafl/src/observers/stacktrace.rs index dacc00f5c8..9c6b13958e 100644 --- a/libafl/src/observers/stacktrace.rs +++ b/libafl/src/observers/stacktrace.rs @@ -39,7 +39,7 @@ use regex::Regex; use serde::{Deserialize, Serialize}; use super::ObserverWithHashField; -use crate::{executors::ExitKind, inputs::UsesInput, observers::Observer, Error}; +use crate::{executors::ExitKind, observers::Observer, Error}; #[cfg(not(feature = "casr"))] /// Collects the backtrace via [`Backtrace`] and [`Debug`] @@ -188,7 +188,7 @@ impl<'a> BacktraceObserver<'a> { } } -impl<'a> ObserverWithHashField for BacktraceObserver<'a> { +impl ObserverWithHashField for BacktraceObserver<'_> { /// Gets the hash value of this observer. #[must_use] fn hash(&self) -> Option { @@ -196,16 +196,8 @@ impl<'a> ObserverWithHashField for BacktraceObserver<'a> { } } -impl<'a, S> Observer for BacktraceObserver<'a> -where - S: UsesInput, -{ - fn post_exec( - &mut self, - _state: &mut S, - _input: &S::Input, - exit_kind: &ExitKind, - ) -> Result<(), Error> { +impl Observer for BacktraceObserver<'_> { + fn post_exec(&mut self, _state: &mut S, _input: &I, exit_kind: &ExitKind) -> Result<(), Error> { if self.harness_type == HarnessType::InProcess { if *exit_kind == ExitKind::Crash { self.update_hash(collect_backtrace()); @@ -218,22 +210,15 @@ where fn post_exec_child( &mut self, - _state: &mut S, - _input: &S::Input, + state: &mut S, + input: &I, exit_kind: &ExitKind, ) -> Result<(), Error> { - if self.harness_type == HarnessType::Child { - if *exit_kind == ExitKind::Crash { - self.update_hash(collect_backtrace()); - } else { - self.clear_hash(); - } - } - Ok(()) + self.post_exec(state, input, exit_kind) } } -impl<'a> Named for BacktraceObserver<'a> { +impl Named for BacktraceObserver<'_> { fn name(&self) -> &Cow<'static, str> { &self.observer_name } @@ -267,7 +252,7 @@ pub fn get_asan_runtime_flags() -> String { flags.join(":") } -/// An observer looking at the backtrace of target command using ASAN output +/// An observer looking at the backtrace of target command using ASAN output. This observer is only compatible with a `ForkserverExecutor`. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct AsanBacktraceObserver { observer_name: Cow<'static, str>, @@ -373,23 +358,7 @@ impl Default for AsanBacktraceObserver { } } -impl Observer for AsanBacktraceObserver -where - S: UsesInput, -{ - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { - Ok(()) - } - - fn post_exec( - &mut self, - _state: &mut S, - _input: &S::Input, - _exit_kind: &ExitKind, - ) -> Result<(), Error> { - Ok(()) - } -} +impl Observer for AsanBacktraceObserver {} impl Named for AsanBacktraceObserver { fn name(&self) -> &Cow<'static, str> { diff --git a/libafl/src/observers/stdio.rs b/libafl/src/observers/stdio.rs index 4ee30876c9..bab111c9d4 100644 --- a/libafl/src/observers/stdio.rs +++ b/libafl/src/observers/stdio.rs @@ -1,6 +1,11 @@ +//! Observers for `stdout` and `stderr` +//! //! The [`StdOutObserver`] and [`StdErrObserver`] observers look at the stdout of a program //! The executor must explicitly support these observers. -//! For example, they are supported on the [`crate::executors::CommandExecutor`]. +#![cfg_attr( + all(feature = "std", unix), + doc = r"For example, they are supported on the [`crate::executors::CommandExecutor`]." +)] use alloc::borrow::Cow; use std::vec::Vec; @@ -8,7 +13,7 @@ use std::vec::Vec; use libafl_bolts::Named; use serde::{Deserialize, Serialize}; -use crate::{inputs::UsesInput, observers::Observer, state::State, Error}; +use crate::{observers::Observer, Error}; /// An observer that captures stdout of a target. /// Only works for supported executors. @@ -25,7 +30,7 @@ use crate::{inputs::UsesInput, observers::Observer, state::State, Error}; /// corpus::{Corpus, InMemoryCorpus, Testcase}, /// events::{EventFirer, NopEventManager}, /// executors::{CommandExecutor, ExitKind}, -/// feedbacks::Feedback, +/// feedbacks::{Feedback, StateInitializer}, /// inputs::{BytesInput, UsesInput}, /// mutators::{MutationResult, NopMutator}, /// observers::{ObserversTuple, StdErrObserver, StdOutObserver}, @@ -51,21 +56,22 @@ use crate::{inputs::UsesInput, observers::Observer, state::State, Error}; /// stderr_observer: Handle, /// } /// -/// impl Feedback for ExportStdXObserver +/// impl StateInitializer for ExportStdXObserver {} +/// +/// +/// impl Feedback for ExportStdXObserver /// where -/// S: State +/// S: State, +/// OT: MatchNameRef /// { -/// fn is_interesting( +/// fn is_interesting( /// &mut self, /// _state: &mut S, /// _manager: &mut EM, -/// _input: &::Input, +/// _input: &I, /// observers: &OT, /// _exit_kind: &ExitKind, /// ) -> Result -/// where -/// EM: EventFirer, -/// OT: ObserversTuple, /// { /// unsafe { /// STDOUT = observers.get(&self.stdout_observer).unwrap().stdout.clone(); @@ -196,20 +202,13 @@ impl Named for StdOutObserver { } } -impl Observer for StdOutObserver -where - S: State, -{ - fn pre_exec_child( - &mut self, - _state: &mut S, - _input: &::Input, - ) -> Result<(), Error> { +impl Observer for StdOutObserver { + fn pre_exec_child(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.stdout = None; Ok(()) } - fn pre_exec(&mut self, _state: &mut S, _input: &::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.stdout = None; Ok(()) } @@ -250,20 +249,13 @@ impl Named for StdErrObserver { } } -impl Observer for StdErrObserver -where - S: State, -{ - fn pre_exec_child( - &mut self, - _state: &mut S, - _input: &::Input, - ) -> Result<(), Error> { +impl Observer for StdErrObserver { + fn pre_exec_child(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.stderr = None; Ok(()) } - fn pre_exec(&mut self, _state: &mut S, _input: &::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.stderr = None; Ok(()) } diff --git a/libafl/src/observers/value.rs b/libafl/src/observers/value.rs index d23e3856d8..47fcde05a2 100644 --- a/libafl/src/observers/value.rs +++ b/libafl/src/observers/value.rs @@ -9,32 +9,29 @@ use core::{ }; use ahash::RandomState; -use libafl_bolts::{ownedref::OwnedRef, AsIter, AsIterMut, AsSlice, AsSliceMut, Named}; -use serde::{Deserialize, Serialize}; +use libafl_bolts::{ownedref::OwnedRef, AsIter, AsIterMut, AsSlice, AsSliceMut, HasLen, Named}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use super::Observer; -use crate::{inputs::UsesInput, observers::ObserverWithHashField, Error}; +use crate::{ + observers::{MapObserver, ObserverWithHashField}, + Error, +}; /// A simple observer with a single value. /// /// The intent is that the value is something with interior mutability which the target could write to even though this /// observer has a reference to it. Use [`RefCellValueObserver`] if using a [`RefCell`] around the value. #[derive(Serialize, Deserialize, Debug)] -#[serde(bound = "T: serde::de::DeserializeOwned")] -pub struct ValueObserver<'a, T> -where - T: Debug + Serialize, -{ +#[allow(clippy::unsafe_derive_deserialize)] +pub struct ValueObserver<'a, T> { /// The name of this observer. name: Cow<'static, str>, /// The value. pub value: OwnedRef<'a, T>, } -impl<'a, T> ValueObserver<'a, T> -where - T: Debug + Serialize + serde::de::DeserializeOwned, -{ +impl<'a, T> ValueObserver<'a, T> { /// Creates a new [`ValueObserver`] with the given name. #[must_use] pub fn new(name: &'static str, value: OwnedRef<'a, T>) -> Self { @@ -70,29 +67,15 @@ where } /// This *does not* reset the value inside the observer. -impl<'a, S, T> Observer for ValueObserver<'a, T> -where - S: UsesInput, - T: Debug + Serialize + serde::de::DeserializeOwned, -{ - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { - Ok(()) - } -} +impl Observer for ValueObserver<'_, T> {} -impl<'a, T> Named for ValueObserver<'a, T> -where - T: Debug + Serialize + serde::de::DeserializeOwned, -{ +impl Named for ValueObserver<'_, T> { fn name(&self) -> &Cow<'static, str> { &self.name } } -impl<'a, T: Hash> ObserverWithHashField for ValueObserver<'a, T> -where - T: Debug + Serialize + serde::de::DeserializeOwned, -{ +impl ObserverWithHashField for ValueObserver<'_, T> { fn hash(&self) -> Option { Some(RandomState::with_seeds(1, 2, 3, 4).hash_one(self.value.as_ref())) } @@ -100,7 +83,7 @@ where /// A simple observer with a single [`RefCell`]'d value. #[derive(Serialize, Deserialize, Debug)] -#[serde(bound = "T: serde::de::DeserializeOwned + serde::Serialize")] +#[allow(clippy::unsafe_derive_deserialize)] pub struct RefCellValueObserver<'a, T> { /// The name of this observer. name: Cow<'static, str>, @@ -160,22 +143,15 @@ impl<'a, T> RefCellValueObserver<'a, T> { } /// This *does not* reset the value inside the observer. -impl<'a, S, T> Observer for RefCellValueObserver<'a, T> -where - S: UsesInput, -{ - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { - Ok(()) - } -} +impl Observer for RefCellValueObserver<'_, T> {} -impl<'a, T> Named for RefCellValueObserver<'a, T> { +impl Named for RefCellValueObserver<'_, T> { fn name(&self) -> &Cow<'static, str> { &self.name } } -impl<'a, T> ObserverWithHashField for RefCellValueObserver<'a, T> +impl ObserverWithHashField for RefCellValueObserver<'_, T> where T: Hash, { @@ -239,7 +215,7 @@ pub struct RefCellValueObserverIterMut<'it, T> { v: Option>, } -impl<'it, T: 'it, A: Debug + DerefMut + Serialize> AsIterMut<'it> +impl<'it, T: 'it, A: DerefMut + Serialize> AsIterMut<'it> for RefCellValueObserver<'_, A> { type RefMut = RefMut<'it, T>; @@ -274,25 +250,20 @@ impl<'it, T: 'it> Iterator for RefCellValueObserverIterMut<'it, T> { } } -impl<'a, T: Hash, A> Hash for RefCellValueObserver<'a, A> -where - T: Debug, - A: Debug + Deref + Serialize + serde::de::DeserializeOwned, -{ +impl Hash for RefCellValueObserver<'_, A> { /// Panics if the contained value is already mutably borrowed (calls /// [`RefCell::borrow`]). #[inline] fn hash(&self, hasher: &mut H) { - (*self.get_ref()).hash(hasher); + self.get_ref().hash(hasher); } } /// Panics if the contained value is already mutably borrowed (calls /// [`RefCell::borrow`]). -impl libafl_bolts::HasLen for RefCellValueObserver<'_, A> +impl HasLen for RefCellValueObserver<'_, A> where - T: Debug, - A: Debug + Deref + Serialize + serde::de::DeserializeOwned, + A: HasLen, { /// Panics if the contained value is already mutably borrowed (calls /// [`RefCell::borrow`]). @@ -307,32 +278,21 @@ where } } -impl AsMut - for RefCellValueObserver<'_, T> -{ - fn as_mut(&mut self) -> &mut Self { - self - } -} - -impl AsRef - for RefCellValueObserver<'_, T> -{ +impl AsRef for RefCellValueObserver<'_, T> { fn as_ref(&self) -> &Self { self } } -impl crate::observers::MapObserver for RefCellValueObserver<'_, A> +impl AsMut for RefCellValueObserver<'_, T> { + fn as_mut(&mut self) -> &mut Self { + self + } +} +impl MapObserver for RefCellValueObserver<'_, A> where - T: Copy + Debug + Default + Eq + Hash + num_traits::bounds::Bounded + 'static, - A: Debug - + Default - + Deref - + DerefMut - + serde::de::DeserializeOwned - + Serialize - + 'static, + T: PartialEq + Copy + Hash + Default + DeserializeOwned + Serialize + Debug, + A: DerefMut + Hash + Serialize + DeserializeOwned + HasLen + Default, { type Entry = T; @@ -385,7 +345,7 @@ where /// Panics if the contained value is already mutably borrowed (calls /// [`RefCell::borrow`]). fn to_vec(&self) -> Vec { - (*self.get_ref()).to_vec() + self.get_ref().to_vec() } /// Panics if the contained value is already mutably borrowed (calls diff --git a/libafl/src/schedulers/accounting.rs b/libafl/src/schedulers/accounting.rs index a65f391b9b..13c167cd99 100644 --- a/libafl/src/schedulers/accounting.rs +++ b/libafl/src/schedulers/accounting.rs @@ -7,19 +7,18 @@ use core::{ }; use hashbrown::HashMap; -use libafl_bolts::{rands::Rand, HasLen, HasRefCnt}; +use libafl_bolts::{rands::Rand, tuples::MatchName, HasLen, HasRefCnt}; use serde::{Deserialize, Serialize}; +use super::IndexesLenTimeMinimizerScheduler; use crate::{ corpus::{Corpus, CorpusId}, - feedbacks::MapIndexesMetadata, - inputs::UsesInput, - observers::{CanTrack, ObserversTuple}, + observers::CanTrack, schedulers::{ minimizer::{IsFavoredMetadata, MinimizerScheduler, DEFAULT_SKIP_NON_FAVORED_PROB}, - LenTimeMulTestcaseScore, Scheduler, + Scheduler, }, - state::{HasCorpus, HasRand, UsesState}, + state::{HasCorpus, HasRand}, Error, HasMetadata, }; @@ -105,87 +104,69 @@ impl TopAccountingMetadata { /// A minimizer scheduler using coverage accounting #[derive(Debug)] -pub struct CoverageAccountingScheduler<'a, CS, O> -where - CS: UsesState, - CS::State: Debug, -{ +pub struct CoverageAccountingScheduler<'a, CS, O> { accounting_map: &'a [u32], skip_non_favored_prob: f64, - inner: MinimizerScheduler< - CS, - LenTimeMulTestcaseScore<::State>, - MapIndexesMetadata, - O, - >, + inner: IndexesLenTimeMinimizerScheduler, } -impl<'a, CS, O> UsesState for CoverageAccountingScheduler<'a, CS, O> +impl Scheduler<::Input, S> for CoverageAccountingScheduler<'_, CS, O> where - CS: UsesState, - CS::State: Debug, -{ - type State = CS::State; -} - -impl<'a, CS, O> Scheduler for CoverageAccountingScheduler<'a, CS, O> -where - CS: Scheduler, - Self::State: HasCorpus + HasMetadata + HasRand, - CS::State: Debug, - ::Input: HasLen, + CS: Scheduler<::Input, S>, + S: HasCorpus + HasMetadata + HasRand, + ::Input: HasLen, O: CanTrack, { - fn on_add(&mut self, state: &mut Self::State, idx: CorpusId) -> Result<(), Error> { - self.update_accounting_score(state, idx)?; - self.inner.on_add(state, idx) + fn on_add(&mut self, state: &mut S, id: CorpusId) -> Result<(), Error> { + self.update_accounting_score(state, id)?; + self.inner.on_add(state, id) } fn on_evaluation( &mut self, - state: &mut Self::State, - input: &::Input, + state: &mut S, + input: &::Input, observers: &OT, ) -> Result<(), Error> where - OT: ObserversTuple, + OT: MatchName, { self.inner.on_evaluation(state, input, observers) } - fn next(&mut self, state: &mut Self::State) -> Result { + fn next(&mut self, state: &mut S) -> Result { if state .metadata_map() .get::() - .map_or(false, |x| x.changed) + .is_some_and(|x| x.changed) { self.accounting_cull(state)?; } else { self.inner.cull(state)?; } - let mut idx = self.inner.base_mut().next(state)?; + let mut id = self.inner.base_mut().next(state)?; while { let has = !state .corpus() - .get(idx)? + .get(id)? .borrow() .has_metadata::(); has } && state.rand_mut().coinflip(self.skip_non_favored_prob) { - idx = self.inner.base_mut().next(state)?; + id = self.inner.base_mut().next(state)?; } // Don't add corpus.curret(). The inner scheduler will take care of it - Ok(idx) + Ok(id) } /// Set current fuzzed corpus id and `scheduled_count` fn set_current_scheduled( &mut self, - _state: &mut Self::State, - _next_idx: Option, + _state: &mut S, + _next_id: Option, ) -> Result<(), Error> { // We do nothing here, the inner scheduler will take care of it Ok(()) @@ -194,19 +175,15 @@ where impl<'a, CS, O> CoverageAccountingScheduler<'a, CS, O> where - CS: Scheduler, - CS::State: HasCorpus + HasMetadata + HasRand + Debug, - ::Input: HasLen, O: CanTrack, { /// Update the `Corpus` score #[allow(clippy::unused_self)] #[allow(clippy::cast_possible_wrap)] - pub fn update_accounting_score( - &self, - state: &mut CS::State, - idx: CorpusId, - ) -> Result<(), Error> { + pub fn update_accounting_score(&self, state: &mut S, id: CorpusId) -> Result<(), Error> + where + S: HasCorpus + HasMetadata, + { let mut indexes = vec![]; let mut new_favoreds = vec![]; { @@ -220,7 +197,7 @@ where { let top_acc = state.metadata_map().get::().unwrap(); - if let Some(old_idx) = top_acc.map.get(&idx) { + if let Some(old_id) = top_acc.map.get(&idx) { if top_acc.max_accounting[idx] > self.accounting_map[idx] { continue; } @@ -229,11 +206,11 @@ where equal_score = true; } - let mut old = state.corpus().get(*old_idx)?.borrow_mut(); + let mut old = state.corpus().get_from_all(*old_id)?.borrow_mut(); let must_remove = { let old_meta = old.metadata_map_mut().get_mut::().ok_or_else(|| { Error::key_not_found(format!( - "AccountingIndexesMetadata, needed by CoverageAccountingScheduler, not found in testcase #{old_idx}" + "AccountingIndexesMetadata, needed by CoverageAccountingScheduler, not found in testcase #{old_id}" )) })?; *old_meta.refcnt_mut() -= 1; @@ -268,7 +245,7 @@ where state .corpus() - .get(idx)? + .get(id)? .borrow_mut() .metadata_map_mut() .insert(AccountingIndexesMetadata::with_tcref( @@ -283,7 +260,7 @@ where top_acc.changed = true; for elem in new_favoreds { - top_acc.map.insert(elem, idx); + top_acc.map.insert(elem, id); } Ok(()) @@ -291,13 +268,16 @@ where /// Cull the `Corpus` #[allow(clippy::unused_self)] - pub fn accounting_cull(&self, state: &CS::State) -> Result<(), Error> { + pub fn accounting_cull(&self, state: &S) -> Result<(), Error> + where + S: HasCorpus + HasMetadata, + { let Some(top_rated) = state.metadata_map().get::() else { return Ok(()); }; - for (_key, idx) in &top_rated.map { - let mut entry = state.corpus().get(*idx)?.borrow_mut(); + for (_key, id) in &top_rated.map { + let mut entry = state.corpus().get(*id)?.borrow_mut(); if entry.scheduled_count() > 0 { continue; } @@ -312,7 +292,10 @@ where /// and has a default probability to skip non-faved Testcases of [`DEFAULT_SKIP_NON_FAVORED_PROB`]. /// /// Provide the observer responsible for determining new indexes. - pub fn new(observer: &O, state: &mut CS::State, base: CS, accounting_map: &'a [u32]) -> Self { + pub fn new(observer: &O, state: &mut S, base: CS, accounting_map: &'a [u32]) -> Self + where + S: HasMetadata, + { match state.metadata_map().get::() { Some(meta) => { if meta.max_accounting.len() != accounting_map.len() { @@ -334,13 +317,16 @@ where /// and has a non-default probability to skip non-faved Testcases using (`skip_non_favored_prob`). /// /// Provide the observer responsible for determining new indexes. - pub fn with_skip_prob( + pub fn with_skip_prob( observer: &O, - state: &mut CS::State, + state: &mut S, base: CS, skip_non_favored_prob: f64, accounting_map: &'a [u32], - ) -> Self { + ) -> Self + where + S: HasMetadata, + { match state.metadata_map().get::() { Some(meta) => { if meta.max_accounting.len() != accounting_map.len() { diff --git a/libafl/src/schedulers/minimizer.rs b/libafl/src/schedulers/minimizer.rs index 9d5615ba54..15bc401a63 100644 --- a/libafl/src/schedulers/minimizer.rs +++ b/libafl/src/schedulers/minimizer.rs @@ -1,21 +1,21 @@ -//! The Minimizer schedulers are a family of corpus schedulers that feed the fuzzer -//! with testcases only from a subset of the total corpus. +//! The [`MinimizerScheduler`]`s` are a family of corpus schedulers that feed the fuzzer +//! with [`Testcase`]`s` only from a subset of the total [`Corpus`]. use alloc::vec::Vec; use core::{any::type_name, cmp::Ordering, marker::PhantomData}; use hashbrown::{HashMap, HashSet}; -use libafl_bolts::{rands::Rand, serdeany::SerdeAny, AsIter, HasRefCnt}; +use libafl_bolts::{rands::Rand, serdeany::SerdeAny, tuples::MatchName, AsIter, HasRefCnt}; use serde::{Deserialize, Serialize}; +use super::HasQueueCycles; use crate::{ corpus::{Corpus, CorpusId, Testcase}, feedbacks::MapIndexesMetadata, - inputs::UsesInput, - observers::{CanTrack, ObserversTuple}, + observers::CanTrack, require_index_tracking, schedulers::{LenTimeMulTestcaseScore, RemovableScheduler, Scheduler, TestcaseScore}, - state::{HasCorpus, HasRand, UsesState}, + state::{HasCorpus, HasRand}, Error, HasMetadata, }; @@ -77,55 +77,50 @@ impl Default for TopRatedsMetadata { } /// The [`MinimizerScheduler`] employs a genetic algorithm to compute a subset of the -/// corpus that exercise all the requested features (e.g. all the coverage seen so far) -/// prioritizing [`Testcase`]`s` using [`TestcaseScore`] +/// corpus that exercise all the requested features. +/// +/// E.g., it can use all the coverage seen so far to prioritize [`Testcase`]`s` using a [`TestcaseScore`]. #[derive(Debug, Clone)] -pub struct MinimizerScheduler { +pub struct MinimizerScheduler { base: CS, pub skip_non_favored_prob: f64, remove_metadata: bool, - phantom: PhantomData<(F, M, O)>, + phantom: PhantomData<(F, M, S)>, } -impl UsesState for MinimizerScheduler +impl RemovableScheduler<::Input, S> + for MinimizerScheduler where - CS: UsesState, -{ - type State = CS::State; -} - -impl RemovableScheduler for MinimizerScheduler -where - CS: RemovableScheduler, - F: TestcaseScore<::State>, + CS: RemovableScheduler<::Input, S> + + Scheduler<::Input, S>, + F: TestcaseScore, M: for<'a> AsIter<'a, Item = usize> + SerdeAny + HasRefCnt, - ::State: HasCorpus + HasMetadata + HasRand, - O: CanTrack, + S: HasCorpus + HasMetadata + HasRand, { - /// Replaces the testcase at the given idx + /// Replaces the [`Testcase`] at the given [`CorpusId`] fn on_replace( &mut self, - state: &mut ::State, - idx: CorpusId, - testcase: &Testcase<<::State as UsesInput>::Input>, + state: &mut S, + id: CorpusId, + testcase: &Testcase<::Input>, ) -> Result<(), Error> { - self.base.on_replace(state, idx, testcase)?; - self.update_score(state, idx) + self.base.on_replace(state, id, testcase)?; + self.update_score(state, id) } /// Removes an entry from the corpus fn on_remove( &mut self, - state: &mut ::State, - idx: CorpusId, - testcase: &Option::State as UsesInput>::Input>>, + state: &mut S, + id: CorpusId, + testcase: &Option::Input>>, ) -> Result<(), Error> { - self.base.on_remove(state, idx, testcase)?; + self.base.on_remove(state, id, testcase)?; let mut entries = if let Some(meta) = state.metadata_map_mut().get_mut::() { let entries = meta .map - .extract_if(|_, other_idx| *other_idx == idx) + .extract_if(|_, other_id| *other_id == id) .map(|(entry, _)| entry) .collect::>(); entries @@ -134,8 +129,8 @@ where }; entries.sort_unstable(); // this should already be sorted, but just in case let mut map = HashMap::new(); - for i in state.corpus().ids() { - let mut old = state.corpus().get(i)?.borrow_mut(); + for current_id in state.corpus().ids() { + let mut old = state.corpus().get(current_id)?.borrow_mut(); let factor = F::compute(state, &mut *old)?; if let Some(old_map) = old.metadata_map_mut().get_mut::() { let mut e_iter = entries.iter(); @@ -153,13 +148,13 @@ where Ordering::Equal => { // if we found a better factor, prefer it map.entry(*e) - .and_modify(|(f, idx)| { + .and_modify(|(f, id)| { if *f > factor { *f = factor; - *idx = i; + *id = current_id; } }) - .or_insert((factor, i)); + .or_insert((factor, current_id)); entry = e_iter.next(); map_entry = map_iter.next(); } @@ -183,16 +178,16 @@ where }; meta.map.reserve(reserve); - for (entry, (_, new_idx)) in map_iter { - let mut new = state.corpus().get(*new_idx)?.borrow_mut(); + for (entry, (_, new_id)) in map_iter { + let mut new = state.corpus().get(*new_id)?.borrow_mut(); let new_meta = new.metadata_map_mut().get_mut::().ok_or_else(|| { Error::key_not_found(format!( - "{} needed for MinimizerScheduler not found in testcase #{new_idx}", + "{} needed for MinimizerScheduler not found in testcase #{new_id}", type_name::() )) })?; *new_meta.refcnt_mut() += 1; - meta.map.insert(*entry, *new_idx); + meta.map.insert(*entry, *new_id); } // Put back the metadata @@ -202,56 +197,55 @@ where } } -impl Scheduler for MinimizerScheduler +impl Scheduler<::Input, S> for MinimizerScheduler where - CS: Scheduler, - F: TestcaseScore, + CS: Scheduler<::Input, S>, + F: TestcaseScore, M: for<'a> AsIter<'a, Item = usize> + SerdeAny + HasRefCnt, - Self::State: HasCorpus + HasMetadata + HasRand, - O: CanTrack, + S: HasCorpus + HasMetadata + HasRand, { /// Called when a [`Testcase`] is added to the corpus - fn on_add(&mut self, state: &mut Self::State, idx: CorpusId) -> Result<(), Error> { - self.base.on_add(state, idx)?; - self.update_score(state, idx) + fn on_add(&mut self, state: &mut S, id: CorpusId) -> Result<(), Error> { + self.base.on_add(state, id)?; + self.update_score(state, id) } /// An input has been evaluated fn on_evaluation( &mut self, - state: &mut Self::State, - input: &::Input, + state: &mut S, + input: &::Input, observers: &OT, ) -> Result<(), Error> where - OT: ObserversTuple, + OT: MatchName, { self.base.on_evaluation(state, input, observers) } /// Gets the next entry - fn next(&mut self, state: &mut Self::State) -> Result { + fn next(&mut self, state: &mut S) -> Result { self.cull(state)?; - let mut idx = self.base.next(state)?; + let mut id = self.base.next(state)?; while { let has = !state .corpus() - .get(idx)? + .get(id)? .borrow() .has_metadata::(); has } && state.rand_mut().coinflip(self.skip_non_favored_prob) { - idx = self.base.next(state)?; + id = self.base.next(state)?; } - Ok(idx) + Ok(id) } /// Set current fuzzed corpus id and `scheduled_count` fn set_current_scheduled( &mut self, - _state: &mut Self::State, - _next_idx: Option, + _state: &mut S, + _next_id: Option, ) -> Result<(), Error> { // We do nothing here, the inner scheduler will take care of it Ok(()) @@ -260,20 +254,16 @@ where impl MinimizerScheduler where - CS: Scheduler, - F: TestcaseScore<::State>, M: for<'a> AsIter<'a, Item = usize> + SerdeAny + HasRefCnt, - ::State: HasCorpus + HasMetadata + HasRand, - O: CanTrack, { /// Update the [`Corpus`] score using the [`MinimizerScheduler`] #[allow(clippy::unused_self)] #[allow(clippy::cast_possible_wrap)] - pub fn update_score( - &self, - state: &mut ::State, - idx: CorpusId, - ) -> Result<(), Error> { + pub fn update_score(&self, state: &mut S, id: CorpusId) -> Result<(), Error> + where + F: TestcaseScore, + S: HasCorpus + HasMetadata, + { // Create a new top rated meta if not existing if state.metadata_map().get::().is_none() { state.add_metadata(TopRatedsMetadata::new()); @@ -281,21 +271,21 @@ where let mut new_favoreds = vec![]; { - let mut entry = state.corpus().get(idx)?.borrow_mut(); + let mut entry = state.corpus().get(id)?.borrow_mut(); let factor = F::compute(state, &mut *entry)?; let meta = entry.metadata_map_mut().get_mut::().ok_or_else(|| { Error::key_not_found(format!( - "Metadata needed for MinimizerScheduler not found in testcase #{idx}" + "Metadata needed for MinimizerScheduler not found in testcase #{id}" )) })?; let top_rateds = state.metadata_map().get::().unwrap(); for elem in meta.as_iter() { - if let Some(old_idx) = top_rateds.map.get(&*elem) { - if *old_idx == idx { + if let Some(old_id) = top_rateds.map.get(&*elem) { + if *old_id == id { new_favoreds.push(*elem); // always retain current; we'll drop it later otherwise continue; } - let mut old = state.corpus().get(*old_idx)?.borrow_mut(); + let mut old = state.corpus().get(*old_id)?.borrow_mut(); if factor > F::compute(state, &mut *old)? { continue; } @@ -303,7 +293,7 @@ where let must_remove = { let old_meta = old.metadata_map_mut().get_mut::().ok_or_else(|| { Error::key_not_found(format!( - "{} needed for MinimizerScheduler not found in testcase #{old_idx}", + "{} needed for MinimizerScheduler not found in testcase #{old_id}", type_name::() )) })?; @@ -326,7 +316,7 @@ where drop( state .corpus() - .get(idx)? + .get(id)? .borrow_mut() .metadata_map_mut() .remove::(), @@ -340,7 +330,7 @@ where .get_mut::() .unwrap() .map - .insert(elem, idx); + .insert(elem, id); } // println!("Number of interesting corpus elements: {}", state.metadata_map_mut().get::().unwrap().get_number()); Ok(()) @@ -348,19 +338,22 @@ where /// Cull the [`Corpus`] using the [`MinimizerScheduler`] #[allow(clippy::unused_self)] - pub fn cull(&self, state: &::State) -> Result<(), Error> { + pub fn cull(&self, state: &S) -> Result<(), Error> + where + S: HasCorpus + HasMetadata, + { let Some(top_rated) = state.metadata_map().get::() else { return Ok(()); }; let mut acc = HashSet::new(); - for (key, idx) in &top_rated.map { + for (key, id) in &top_rated.map { if !acc.contains(key) { - let mut entry = state.corpus().get(*idx)?.borrow_mut(); + let mut entry = state.corpus().get(*id)?.borrow_mut(); let meta = entry.metadata_map().get::().ok_or_else(|| { Error::key_not_found(format!( - "{} needed for MinimizerScheduler not found in testcase #{idx}", + "{} needed for MinimizerScheduler not found in testcase #{id}", type_name::() )) })?; @@ -374,7 +367,19 @@ where Ok(()) } - +} +impl HasQueueCycles for MinimizerScheduler +where + CS: HasQueueCycles, +{ + fn queue_cycles(&self) -> u64 { + self.base.queue_cycles() + } +} +impl MinimizerScheduler +where + O: CanTrack, +{ /// Get a reference to the base scheduler pub fn base(&self) -> &CS { &self.base @@ -433,13 +438,9 @@ where /// A [`MinimizerScheduler`] with [`LenTimeMulTestcaseScore`] to prioritize quick and small [`Testcase`]`s`. pub type LenTimeMinimizerScheduler = - MinimizerScheduler::State>, M, O>; + MinimizerScheduler; /// A [`MinimizerScheduler`] with [`LenTimeMulTestcaseScore`] to prioritize quick and small [`Testcase`]`s` /// that exercise all the entries registered in the [`MapIndexesMetadata`]. -pub type IndexesLenTimeMinimizerScheduler = MinimizerScheduler< - CS, - LenTimeMulTestcaseScore<::State>, - MapIndexesMetadata, - O, ->; +pub type IndexesLenTimeMinimizerScheduler = + MinimizerScheduler; diff --git a/libafl/src/schedulers/mod.rs b/libafl/src/schedulers/mod.rs index 2a0b65df48..252da33b18 100644 --- a/libafl/src/schedulers/mod.rs +++ b/libafl/src/schedulers/mod.rs @@ -29,30 +29,27 @@ pub use weighted::{StdWeightedScheduler, WeightedScheduler}; pub mod tuneable; use libafl_bolts::{ rands::Rand, - tuples::{Handle, MatchNameRef}, + tuples::{Handle, MatchName, MatchNameRef}, }; pub use tuneable::*; use crate::{ corpus::{Corpus, CorpusId, HasTestcase, SchedulerTestcaseMetadata, Testcase}, - inputs::UsesInput, - observers::{MapObserver, ObserversTuple}, + observers::MapObserver, random_corpus_id, - state::{HasCorpus, HasRand, State, UsesState}, + state::{HasCorpus, HasRand}, Error, HasMetadata, }; /// The scheduler also implements `on_remove` and `on_replace` if it implements this stage. -pub trait RemovableScheduler: Scheduler -where - Self::State: HasCorpus, -{ +pub trait RemovableScheduler { /// Removed the given entry from the corpus at the given index + /// When you remove testcases, make sure that that testcase is not currently fuzzed one! fn on_remove( &mut self, - _state: &mut Self::State, - _idx: CorpusId, - _testcase: &Option::Input>>, + _state: &mut S, + _id: CorpusId, + _testcase: &Option>, ) -> Result<(), Error> { Ok(()) } @@ -60,21 +57,105 @@ where /// Replaced the given testcase at the given idx fn on_replace( &mut self, - _state: &mut Self::State, - _idx: CorpusId, - _prev: &Testcase<::Input>, + _state: &mut S, + _id: CorpusId, + _prev: &Testcase, ) -> Result<(), Error> { Ok(()) } } -/// Defines the common metadata operations for the AFL-style schedulers -pub trait AflScheduler: Scheduler +/// Called when a [`Testcase`] is evaluated +pub fn on_add_metadata_default( + scheduler: &mut CS, + state: &mut S, + id: CorpusId, +) -> Result<(), Error> where - Self::State: HasCorpus + HasMetadata + HasTestcase, - O: MapObserver, - C: AsRef, + CS: AflScheduler, + S: HasTestcase + HasCorpus, { + let current_id = *state.corpus().current(); + + let mut depth = match current_id { + Some(parent_idx) => state + .testcase(parent_idx)? + .metadata::()? + .depth(), + None => 0, + }; + + // TODO increase perf_score when finding new things like in AFL + // https://github.com/google/AFL/blob/master/afl-fuzz.c#L6547 + + // Attach a `SchedulerTestcaseMetadata` to the queue entry. + depth += 1; + let mut testcase = state.testcase_mut(id)?; + testcase.add_metadata(SchedulerTestcaseMetadata::with_n_fuzz_entry( + depth, + scheduler.last_hash(), + )); + testcase.set_parent_id_optional(current_id); + Ok(()) +} + +/// Called when a [`Testcase`] is evaluated +pub fn on_evaluation_metadata_default( + scheduler: &mut CS, + state: &mut S, + observers: &OT, +) -> Result<(), Error> +where + CS: AflScheduler, + CS::MapObserverRef: AsRef, + S: HasMetadata, + O: MapObserver, + OT: MatchName, +{ + let observer = observers + .get(scheduler.map_observer_handle()) + .ok_or_else(|| Error::key_not_found("MapObserver not found".to_string()))? + .as_ref(); + + let mut hash = observer.hash_simple() as usize; + + let psmeta = state.metadata_mut::()?; + + hash %= psmeta.n_fuzz().len(); + // Update the path frequency + psmeta.n_fuzz_mut()[hash] = psmeta.n_fuzz()[hash].saturating_add(1); + + scheduler.set_last_hash(hash); + + Ok(()) +} + +/// Called when choosing the next [`Testcase`] +pub fn on_next_metadata_default(state: &mut S) -> Result<(), Error> +where + S: HasCorpus + HasTestcase, +{ + let current_id = *state.corpus().current(); + + if let Some(id) = current_id { + let mut testcase = state.testcase_mut(id)?; + let tcmeta = testcase.metadata_mut::()?; + + if tcmeta.handicap() >= 4 { + tcmeta.set_handicap(tcmeta.handicap() - 4); + } else if tcmeta.handicap() > 0 { + tcmeta.set_handicap(tcmeta.handicap() - 1); + } + } + + Ok(()) +} + +/// Defines the common metadata operations for the AFL-style schedulers +pub trait AflScheduler { + /// The type of [`MapObserver`] that this scheduler will use as reference + type MapObserverRef; + /// Return the last hash fn last_hash(&self) -> usize; @@ -82,121 +163,48 @@ where fn set_last_hash(&mut self, value: usize); /// Get the observer map observer name - fn map_observer_handle(&self) -> &Handle; + fn map_observer_handle(&self) -> &Handle; +} - /// Called when a [`Testcase`] is added to the corpus - fn on_add_metadata(&self, state: &mut Self::State, idx: CorpusId) -> Result<(), Error> { - let current_idx = *state.corpus().current(); - - let mut depth = match current_idx { - Some(parent_idx) => state - .testcase(parent_idx)? - .metadata::()? - .depth(), - None => 0, - }; - - // TODO increase perf_score when finding new things like in AFL - // https://github.com/google/AFL/blob/master/afl-fuzz.c#L6547 - - // Attach a `SchedulerTestcaseMetadata` to the queue entry. - depth += 1; - let mut testcase = state.testcase_mut(idx)?; - testcase.add_metadata(SchedulerTestcaseMetadata::with_n_fuzz_entry( - depth, - self.last_hash(), - )); - testcase.set_parent_id_optional(current_idx); - Ok(()) - } - - /// Called when a [`Testcase`] is evaluated - fn on_evaluation_metadata( - &mut self, - state: &mut Self::State, - _input: &::Input, - observers: &OT, - ) -> Result<(), Error> - where - OT: ObserversTuple, - { - let observer = observers - .get(self.map_observer_handle()) - .ok_or_else(|| Error::key_not_found("MapObserver not found".to_string()))? - .as_ref(); - - let mut hash = observer.hash_simple() as usize; - - let psmeta = state.metadata_mut::()?; - - hash %= psmeta.n_fuzz().len(); - // Update the path frequency - psmeta.n_fuzz_mut()[hash] = psmeta.n_fuzz()[hash].saturating_add(1); - - self.set_last_hash(hash); - - Ok(()) - } - - /// Called when choosing the next [`Testcase`] - fn on_next_metadata( - &mut self, - state: &mut Self::State, - _next_idx: Option, - ) -> Result<(), Error> { - let current_idx = *state.corpus().current(); - - if let Some(idx) = current_idx { - let mut testcase = state.testcase_mut(idx)?; - let tcmeta = testcase.metadata_mut::()?; - - if tcmeta.handicap() >= 4 { - tcmeta.set_handicap(tcmeta.handicap() - 4); - } else if tcmeta.handicap() > 0 { - tcmeta.set_handicap(tcmeta.handicap() - 1); - } - } - - Ok(()) - } +/// Trait for Schedulers which track queue cycles +pub trait HasQueueCycles { + /// The amount of cycles the scheduler has completed. + fn queue_cycles(&self) -> u64; } /// The scheduler define how the fuzzer requests a testcase from the corpus. /// It has hooks to corpus add/replace/remove to allow complex scheduling algorithms to collect data. -pub trait Scheduler: UsesState -where - Self::State: HasCorpus, -{ +pub trait Scheduler { /// Called when a [`Testcase`] is added to the corpus - fn on_add(&mut self, _state: &mut Self::State, _idx: CorpusId) -> Result<(), Error>; + fn on_add(&mut self, _state: &mut S, _id: CorpusId) -> Result<(), Error>; // Add parent_id here if it has no inner /// An input has been evaluated fn on_evaluation( &mut self, - _state: &mut Self::State, - _input: &::Input, + _state: &mut S, + _input: &I, _observers: &OT, ) -> Result<(), Error> where - OT: ObserversTuple, + OT: MatchName, { Ok(()) } /// Gets the next entry - fn next(&mut self, state: &mut Self::State) -> Result; + fn next(&mut self, state: &mut S) -> Result; // Increment corpus.current() here if it has no inner /// Set current fuzzed corpus id and `scheduled_count` fn set_current_scheduled( &mut self, - state: &mut Self::State, - next_idx: Option, - ) -> Result<(), Error> { - *state.corpus_mut().current_mut() = next_idx; - Ok(()) - } + state: &mut S, + next_id: Option, + ) -> Result<(), Error>; + + // *state.corpus_mut().current_mut() = next_id; + // Ok(()) } /// Feed the fuzzer simply with a random testcase on request @@ -205,31 +213,24 @@ pub struct RandScheduler { phantom: PhantomData, } -impl UsesState for RandScheduler +impl Scheduler for RandScheduler where - S: State + HasTestcase, + S: HasCorpus + HasRand, { - type State = S; -} - -impl Scheduler for RandScheduler -where - S: HasCorpus + HasRand + HasTestcase + State, -{ - fn on_add(&mut self, state: &mut Self::State, idx: CorpusId) -> Result<(), Error> { + fn on_add(&mut self, state: &mut S, id: CorpusId) -> Result<(), Error> { // Set parent id - let current_idx = *state.corpus().current(); + let current_id = *state.corpus().current(); state .corpus() - .get(idx)? + .get(id)? .borrow_mut() - .set_parent_id_optional(current_idx); + .set_parent_id_optional(current_id); Ok(()) } /// Gets the next entry at random - fn next(&mut self, state: &mut Self::State) -> Result { + fn next(&mut self, state: &mut S) -> Result { if state.corpus().count() == 0 { Err(Error::empty( "No entries in corpus. This often implies the target is not properly instrumented." @@ -237,10 +238,19 @@ where )) } else { let id = random_corpus_id!(state.corpus(), state.rand_mut()); - self.set_current_scheduled(state, Some(id))?; + >::set_current_scheduled(self, state, Some(id))?; Ok(id) } } + + fn set_current_scheduled( + &mut self, + state: &mut S, + next_id: Option, + ) -> Result<(), Error> { + *state.corpus_mut().current_mut() = next_id; + Ok(()) + } } impl RandScheduler { @@ -260,5 +270,6 @@ impl Default for RandScheduler { } /// A [`StdScheduler`] uses the default scheduler in `LibAFL` to schedule [`Testcase`]s. +/// /// The current `Std` is a [`RandScheduler`], although this may change in the future, if another [`Scheduler`] delivers better results. pub type StdScheduler = RandScheduler; diff --git a/libafl/src/schedulers/powersched.rs b/libafl/src/schedulers/powersched.rs index ff0afed54c..8da02f38c2 100644 --- a/libafl/src/schedulers/powersched.rs +++ b/libafl/src/schedulers/powersched.rs @@ -4,17 +4,19 @@ use alloc::vec::Vec; use core::{marker::PhantomData, time::Duration}; use libafl_bolts::{ - tuples::{Handle, Handled}, + tuples::{Handle, Handled, MatchName}, Named, }; use serde::{Deserialize, Serialize}; use crate::{ corpus::{Corpus, CorpusId, HasTestcase, Testcase}, - inputs::UsesInput, - observers::{MapObserver, ObserversTuple}, - schedulers::{AflScheduler, RemovableScheduler, Scheduler}, - state::{HasCorpus, State, UsesState}, + observers::MapObserver, + schedulers::{ + on_add_metadata_default, on_evaluation_metadata_default, on_next_metadata_default, + AflScheduler, HasQueueCycles, RemovableScheduler, Scheduler, + }, + state::{HasCorpus, State}, Error, HasMetadata, }; @@ -155,9 +157,104 @@ impl SchedulerMetadata { } } +/// The struct for the powerschedule algorithm +#[derive(Debug, Clone, Serialize, Deserialize, Copy)] +pub struct PowerSchedule { + base: BaseSchedule, + avoid_crash: bool, +} + +impl PowerSchedule { + #[must_use] + /// Constructor + pub fn new(base: BaseSchedule) -> Self { + Self { + base, + avoid_crash: false, + } + } + + /// Use `explore` power schedule + #[must_use] + pub fn explore() -> Self { + Self { + base: BaseSchedule::EXPLORE, + avoid_crash: false, + } + } + + /// Use `exploit` power schedule + #[must_use] + pub fn exploit() -> Self { + Self { + base: BaseSchedule::EXPLOIT, + avoid_crash: false, + } + } + + /// Use `fast` power schedule + #[must_use] + pub fn fast() -> Self { + Self { + base: BaseSchedule::FAST, + avoid_crash: false, + } + } + + /// Use `coe` power schedule + #[must_use] + pub fn coe() -> Self { + Self { + base: BaseSchedule::COE, + avoid_crash: false, + } + } + + /// Use `lin` power schedule + #[must_use] + pub fn lin() -> Self { + Self { + base: BaseSchedule::LIN, + avoid_crash: false, + } + } + + /// Use `quad` power schedule + #[must_use] + pub fn quad() -> Self { + Self { + base: BaseSchedule::QUAD, + avoid_crash: false, + } + } + + /// Getter to `avoid_crash` + #[must_use] + pub fn avoid_crash(&self) -> bool { + self.avoid_crash + } + + /// Avoid scheduling testcases that caused crashes + pub fn set_avoid_crash(&mut self) { + self.avoid_crash = true; + } + + /// Getter to the base scheduler + #[must_use] + pub fn base(&self) -> &BaseSchedule { + &self.base + } + + /// Setter to the base scheduler + pub fn set_base(&mut self, base: BaseSchedule) { + self.base = base; + } +} + /// The power schedule to use #[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq, Eq)] -pub enum PowerSchedule { +#[cfg_attr(feature = "clap", derive(clap::ValueEnum))] +pub enum BaseSchedule { /// The `explore` power schedule EXPLORE, /// The `exploit` power schedule @@ -176,32 +273,21 @@ pub enum PowerSchedule { /// Note that this corpus is merely holding the metadata necessary for the power calculation /// and here we DON'T actually calculate the power (we do it in the stage) #[derive(Clone, Debug)] -pub struct PowerQueueScheduler { +pub struct PowerQueueScheduler { + queue_cycles: u64, strat: PowerSchedule, map_observer_handle: Handle, last_hash: usize, - phantom: PhantomData<(O, S)>, + phantom: PhantomData, } -impl UsesState for PowerQueueScheduler -where - S: State, -{ - type State = S; -} - -impl RemovableScheduler for PowerQueueScheduler -where - S: State + HasTestcase + HasMetadata + HasCorpus, - O: MapObserver, - C: AsRef, -{ +impl RemovableScheduler for PowerQueueScheduler { /// This will *NOT* neutralize the effect of this removed testcase from the global data such as `SchedulerMetadata` fn on_remove( &mut self, - _state: &mut Self::State, - _idx: CorpusId, - _prev: &Option::Input>>, + _state: &mut S, + _id: CorpusId, + _prev: &Option>, ) -> Result<(), Error> { Ok(()) } @@ -209,20 +295,17 @@ where /// This will *NOT* neutralize the effect of this removed testcase from the global data such as `SchedulerMetadata` fn on_replace( &mut self, - _state: &mut Self::State, - _idx: CorpusId, - _prev: &Testcase<::Input>, + _state: &mut S, + _id: CorpusId, + _prev: &Testcase, ) -> Result<(), Error> { Ok(()) } } -impl AflScheduler for PowerQueueScheduler -where - S: HasCorpus + HasMetadata + HasTestcase + State, - O: MapObserver, - C: AsRef, -{ +impl AflScheduler for PowerQueueScheduler { + type MapObserverRef = C; + fn last_hash(&self) -> usize { self.last_hash } @@ -236,30 +319,31 @@ where } } -impl Scheduler for PowerQueueScheduler +impl HasQueueCycles for PowerQueueScheduler { + fn queue_cycles(&self) -> u64 { + self.queue_cycles + } +} + +impl Scheduler for PowerQueueScheduler where S: HasCorpus + HasMetadata + HasTestcase + State, O: MapObserver, C: AsRef, { /// Called when a [`Testcase`] is added to the corpus - fn on_add(&mut self, state: &mut Self::State, idx: CorpusId) -> Result<(), Error> { - self.on_add_metadata(state, idx) + fn on_add(&mut self, state: &mut S, id: CorpusId) -> Result<(), Error> { + on_add_metadata_default(self, state, id) } - fn on_evaluation( - &mut self, - state: &mut Self::State, - input: &::Input, - observers: &OT, - ) -> Result<(), Error> + fn on_evaluation(&mut self, state: &mut S, _input: &I, observers: &OT) -> Result<(), Error> where - OT: ObserversTuple, + OT: MatchName, { - self.on_evaluation_metadata(state, input, observers) + on_evaluation_metadata_default(self, state, observers) } - fn next(&mut self, state: &mut Self::State) -> Result { + fn next(&mut self, state: &mut S) -> Result { if state.corpus().count() == 0 { Err(Error::empty( "No entries in corpus. This often implies the target is not properly instrumented.", @@ -270,14 +354,15 @@ where if let Some(next) = state.corpus().next(*cur) { next } else { + self.queue_cycles += 1; let psmeta = state.metadata_mut::()?; - psmeta.set_queue_cycles(psmeta.queue_cycles() + 1); + psmeta.set_queue_cycles(self.queue_cycles()); state.corpus().first().unwrap() } } None => state.corpus().first().unwrap(), }; - self.set_current_scheduled(state, Some(id))?; + >::set_current_scheduled(self, state, Some(id))?; Ok(id) } @@ -286,29 +371,32 @@ where /// Set current fuzzed corpus id and `scheduled_count` fn set_current_scheduled( &mut self, - state: &mut Self::State, - next_idx: Option, + state: &mut S, + next_id: Option, ) -> Result<(), Error> { - self.on_next_metadata(state, next_idx)?; + on_next_metadata_default(state)?; - *state.corpus_mut().current_mut() = next_idx; + *state.corpus_mut().current_mut() = next_id; Ok(()) } } -impl PowerQueueScheduler +impl PowerQueueScheduler where - S: HasMetadata, O: MapObserver, C: AsRef + Named, { /// Create a new [`PowerQueueScheduler`] #[must_use] - pub fn new(state: &mut S, map_observer: &C, strat: PowerSchedule) -> Self { + pub fn new(state: &mut S, map_observer: &C, strat: PowerSchedule) -> Self + where + S: HasMetadata, + { if !state.has_metadata::() { state.add_metadata::(SchedulerMetadata::new(Some(strat))); } PowerQueueScheduler { + queue_cycles: 0, strat, map_observer_handle: map_observer.handle(), last_hash: 0, diff --git a/libafl/src/schedulers/probabilistic_sampling.rs b/libafl/src/schedulers/probabilistic_sampling.rs index 4d528e75c6..ccd6b0e6c3 100644 --- a/libafl/src/schedulers/probabilistic_sampling.rs +++ b/libafl/src/schedulers/probabilistic_sampling.rs @@ -9,20 +9,16 @@ use libafl_bolts::rands::Rand; use serde::{Deserialize, Serialize}; use crate::{ - corpus::{Corpus, CorpusId, HasTestcase, Testcase}, - inputs::UsesInput, + corpus::{Corpus, CorpusId, Testcase}, schedulers::{RemovableScheduler, Scheduler, TestcaseScore}, - state::{HasCorpus, HasRand, State, UsesState}, + state::{HasCorpus, HasRand}, Error, HasMetadata, }; /// Conduct reservoir sampling (probabilistic sampling) over all corpus elements. #[derive(Debug, Clone)] -pub struct ProbabilitySamplingScheduler -where - S: UsesInput, -{ - phantom: PhantomData<(F, S)>, +pub struct ProbabilitySamplingScheduler { + phantom: PhantomData, } /// A state metadata holding a map of probability of corpus elements. @@ -57,11 +53,7 @@ impl Default for ProbabilityMetadata { } } -impl ProbabilitySamplingScheduler -where - F: TestcaseScore, - S: HasCorpus + HasMetadata + HasRand, -{ +impl ProbabilitySamplingScheduler { /// Creates a new [`struct@ProbabilitySamplingScheduler`] #[must_use] pub fn new() -> Self { @@ -73,8 +65,12 @@ where /// Calculate the score and store in `ProbabilityMetadata` #[allow(clippy::cast_precision_loss)] #[allow(clippy::unused_self)] - pub fn store_probability(&self, state: &mut S, idx: CorpusId) -> Result<(), Error> { - let prob = F::compute(state, &mut *state.corpus().get(idx)?.borrow_mut())?; + pub fn store_probability(&self, state: &mut S, id: CorpusId) -> Result<(), Error> + where + F: TestcaseScore, + S: HasCorpus + HasMetadata + HasRand, + { + let prob = F::compute(state, &mut *state.corpus().get(id)?.borrow_mut())?; debug_assert!( prob >= 0.0 && prob.is_finite(), "scheduler probability is {prob}; to work correctly it must be >= 0.0 and finite" @@ -83,28 +79,28 @@ where .metadata_map_mut() .get_mut::() .unwrap(); - meta.map.insert(idx, prob); + meta.map.insert(id, prob); meta.total_probability += prob; Ok(()) } } -impl RemovableScheduler for ProbabilitySamplingScheduler +impl RemovableScheduler<::Input, S> for ProbabilitySamplingScheduler where F: TestcaseScore, - S: HasCorpus + HasMetadata + HasRand + HasTestcase + State, + S: HasCorpus + HasMetadata + HasRand, { fn on_remove( &mut self, - state: &mut Self::State, - idx: CorpusId, - _testcase: &Option::Input>>, + state: &mut S, + id: CorpusId, + _testcase: &Option::Input>>, ) -> Result<(), Error> { let meta = state .metadata_map_mut() .get_mut::() .unwrap(); - if let Some(prob) = meta.map.remove(&idx) { + if let Some(prob) = meta.map.remove(&id) { meta.total_probability -= prob; } Ok(()) @@ -112,51 +108,44 @@ where fn on_replace( &mut self, - state: &mut Self::State, - idx: CorpusId, - _prev: &Testcase<::Input>, + state: &mut S, + id: CorpusId, + _prev: &Testcase<::Input>, ) -> Result<(), Error> { let meta = state .metadata_map_mut() .get_mut::() .unwrap(); - if let Some(prob) = meta.map.remove(&idx) { + if let Some(prob) = meta.map.remove(&id) { meta.total_probability -= prob; } - self.store_probability(state, idx) + self.store_probability(state, id) } } -impl UsesState for ProbabilitySamplingScheduler -where - S: State + HasTestcase, -{ - type State = S; -} - -impl Scheduler for ProbabilitySamplingScheduler +impl Scheduler<::Input, S> for ProbabilitySamplingScheduler where F: TestcaseScore, - S: HasCorpus + HasMetadata + HasRand + HasTestcase + State, + S: HasCorpus + HasMetadata + HasRand, { - fn on_add(&mut self, state: &mut Self::State, idx: CorpusId) -> Result<(), Error> { - let current_idx = *state.corpus().current(); + fn on_add(&mut self, state: &mut S, id: CorpusId) -> Result<(), Error> { + let current_id = *state.corpus().current(); state .corpus() - .get(idx)? + .get(id)? .borrow_mut() - .set_parent_id_optional(current_idx); + .set_parent_id_optional(current_id); if state.metadata_map().get::().is_none() { state.add_metadata(ProbabilityMetadata::new()); } - self.store_probability(state, idx) + self.store_probability(state, id) } /// Gets the next entry #[allow(clippy::cast_precision_loss)] - fn next(&mut self, state: &mut Self::State) -> Result { + fn next(&mut self, state: &mut S) -> Result { if state.corpus().count() == 0 { Err(Error::empty(String::from( "No entries in corpus. This often implies the target is not properly instrumented.", @@ -178,13 +167,18 @@ where Ok(ret) } } + + fn set_current_scheduled( + &mut self, + state: &mut S, + next_id: Option, + ) -> Result<(), Error> { + *state.corpus_mut().current_mut() = next_id; + Ok(()) + } } -impl Default for ProbabilitySamplingScheduler -where - F: TestcaseScore, - S: HasCorpus + HasMetadata + HasRand, -{ +impl Default for ProbabilitySamplingScheduler { fn default() -> Self { Self::new() } @@ -193,40 +187,38 @@ where #[cfg(test)] #[cfg(feature = "std")] mod tests { - use core::{borrow::BorrowMut, marker::PhantomData}; + use core::borrow::BorrowMut; use libafl_bolts::rands::StdRand; use crate::{ corpus::{Corpus, InMemoryCorpus, Testcase}, feedbacks::ConstFeedback, - inputs::{bytes::BytesInput, Input, UsesInput}, + inputs::bytes::BytesInput, schedulers::{ProbabilitySamplingScheduler, Scheduler, TestcaseScore}, state::{HasCorpus, StdState}, - Error, HasMetadata, + Error, }; const FACTOR: f64 = 1337.0; #[derive(Debug, Clone)] - pub struct UniformDistribution - where - I: Input, - { - phantom: PhantomData, - } + pub struct UniformDistribution {} - impl TestcaseScore for UniformDistribution + impl TestcaseScore for UniformDistribution where - S: HasMetadata + HasCorpus, + S: HasCorpus, { - fn compute(_state: &S, _: &mut Testcase) -> Result { + fn compute( + _state: &S, + _: &mut Testcase<::Input>, + ) -> Result { Ok(FACTOR) } } - pub type UniformProbabilitySamplingScheduler = - ProbabilitySamplingScheduler::Input>, S>; + pub type UniformProbabilitySamplingScheduler = + ProbabilitySamplingScheduler; #[test] fn test_prob_sampling() { @@ -240,7 +232,8 @@ mod tests { // the first 3 probabilities will be .76, .86, .36 let rand = StdRand::with_seed(2); - let mut scheduler = UniformProbabilitySamplingScheduler::new(); + let mut scheduler: ProbabilitySamplingScheduler<_> = + UniformProbabilitySamplingScheduler::new(); let mut feedback = ConstFeedback::new(false); let mut objective = ConstFeedback::new(false); @@ -262,10 +255,10 @@ mod tests { .unwrap(); scheduler.on_add(state.borrow_mut(), idx1).unwrap(); scheduler.on_add(state.borrow_mut(), idx2).unwrap(); - let next_idx1 = scheduler.next(&mut state).unwrap(); - let next_idx2 = scheduler.next(&mut state).unwrap(); - let next_idx3 = scheduler.next(&mut state).unwrap(); - assert_eq!(next_idx1, next_idx2); - assert_ne!(next_idx1, next_idx3); + let next_id1 = scheduler.next(&mut state).unwrap(); + let next_id2 = scheduler.next(&mut state).unwrap(); + let next_id3 = scheduler.next(&mut state).unwrap(); + assert_eq!(next_id1, next_id2); + assert_ne!(next_id1, next_id3); } } diff --git a/libafl/src/schedulers/queue.rs b/libafl/src/schedulers/queue.rs index 0608f453c1..8787eb5379 100644 --- a/libafl/src/schedulers/queue.rs +++ b/libafl/src/schedulers/queue.rs @@ -1,48 +1,41 @@ //! The queue corpus scheduler implements an AFL-like queue mechanism use alloc::borrow::ToOwned; -use core::marker::PhantomData; use crate::{ - corpus::{Corpus, CorpusId, HasTestcase}, - schedulers::{RemovableScheduler, Scheduler}, - state::{HasCorpus, State, UsesState}, + corpus::{Corpus, CorpusId}, + schedulers::{HasQueueCycles, RemovableScheduler, Scheduler}, + state::HasCorpus, Error, }; /// Walk the corpus in a queue-like fashion #[derive(Debug, Clone)] -pub struct QueueScheduler { - phantom: PhantomData, +pub struct QueueScheduler { + queue_cycles: u64, + runs_in_current_cycle: u64, } -impl UsesState for QueueScheduler -where - S: State, -{ - type State = S; -} +impl RemovableScheduler for QueueScheduler {} -impl RemovableScheduler for QueueScheduler where S: HasCorpus + HasTestcase + State {} - -impl Scheduler for QueueScheduler +impl Scheduler for QueueScheduler where - S: HasCorpus + HasTestcase + State, + S: HasCorpus, { - fn on_add(&mut self, state: &mut Self::State, idx: CorpusId) -> Result<(), Error> { + fn on_add(&mut self, state: &mut S, id: CorpusId) -> Result<(), Error> { // Set parent id - let current_idx = *state.corpus().current(); + let current_id = *state.corpus().current(); state .corpus() - .get(idx)? + .get(id)? .borrow_mut() - .set_parent_id_optional(current_idx); + .set_parent_id_optional(current_id); Ok(()) } /// Gets the next entry in the queue - fn next(&mut self, state: &mut Self::State) -> Result { + fn next(&mut self, state: &mut S) -> Result { if state.corpus().count() == 0 { Err(Error::empty( "No entries in corpus. This often implies the target is not properly instrumented." @@ -55,28 +48,50 @@ where .map(|id| state.corpus().next(id)) .flatten() .unwrap_or_else(|| state.corpus().first().unwrap()); - self.set_current_scheduled(state, Some(id))?; + + self.runs_in_current_cycle += 1; + // TODO deal with corpus_counts decreasing due to removals + if self.runs_in_current_cycle >= state.corpus().count() as u64 { + self.queue_cycles += 1; + } + >::set_current_scheduled(self, state, Some(id))?; Ok(id) } } + + fn set_current_scheduled( + &mut self, + state: &mut S, + next_id: Option, + ) -> Result<(), Error> { + *state.corpus_mut().current_mut() = next_id; + Ok(()) + } } -impl QueueScheduler { +impl QueueScheduler { /// Creates a new `QueueScheduler` #[must_use] pub fn new() -> Self { Self { - phantom: PhantomData, + runs_in_current_cycle: 0, + queue_cycles: 0, } } } -impl Default for QueueScheduler { +impl Default for QueueScheduler { fn default() -> Self { Self::new() } } +impl HasQueueCycles for QueueScheduler { + fn queue_cycles(&self) -> u64 { + self.queue_cycles + } +} + #[cfg(test)] #[cfg(feature = "std")] mod tests { @@ -96,7 +111,7 @@ mod tests { #[test] fn test_queuecorpus() { let rand = StdRand::with_seed(4); - let mut scheduler = QueueScheduler::new(); + let mut scheduler: QueueScheduler = QueueScheduler::new(); let mut q = OnDiskCorpus::::new(PathBuf::from("target/.test/fancy/path")).unwrap(); @@ -112,10 +127,11 @@ mod tests { let mut state = StdState::new(rand, q, objective_q, &mut feedback, &mut objective).unwrap(); - let next_idx = scheduler.next(&mut state).unwrap(); + let next_id = + >::next(&mut scheduler, &mut state).unwrap(); let filename = state .corpus() - .get(next_idx) + .get(next_id) .unwrap() .borrow() .filename() diff --git a/libafl/src/schedulers/testcase_score.rs b/libafl/src/schedulers/testcase_score.rs index 4e57549712..d406bc4233 100644 --- a/libafl/src/schedulers/testcase_score.rs +++ b/libafl/src/schedulers/testcase_score.rs @@ -1,6 +1,5 @@ //! The `TestcaseScore` is an evaluator providing scores of corpus items. use alloc::string::{String, ToString}; -use core::marker::PhantomData; use libafl_bolts::{HasLen, HasRefCnt}; @@ -9,7 +8,7 @@ use crate::{ feedbacks::MapIndexesMetadata, schedulers::{ minimizer::{IsFavoredMetadata, TopRatedsMetadata}, - powersched::{PowerSchedule, SchedulerMetadata}, + powersched::{BaseSchedule, SchedulerMetadata}, }, state::HasCorpus, Error, HasMetadata, @@ -18,26 +17,28 @@ use crate::{ /// Compute the favor factor of a [`Testcase`]. Higher is better. pub trait TestcaseScore where - S: HasMetadata + HasCorpus, + S: HasCorpus, { /// Computes the favor factor of a [`Testcase`]. Higher is better. - fn compute(state: &S, entry: &mut Testcase) -> Result; + fn compute(state: &S, entry: &mut Testcase<::Input>) + -> Result; } /// Multiply the testcase size with the execution time. /// This favors small and quick testcases. #[derive(Debug, Clone)] -pub struct LenTimeMulTestcaseScore { - phantom: PhantomData, -} +pub struct LenTimeMulTestcaseScore {} -impl TestcaseScore for LenTimeMulTestcaseScore +impl TestcaseScore for LenTimeMulTestcaseScore where - S: HasCorpus + HasMetadata, - S::Input: HasLen, + S: HasCorpus, + ::Input: HasLen, { #[allow(clippy::cast_precision_loss, clippy::cast_lossless)] - fn compute(state: &S, entry: &mut Testcase) -> Result { + fn compute( + state: &S, + entry: &mut Testcase<::Input>, + ) -> Result { // TODO maybe enforce entry.exec_time().is_some() Ok(entry.exec_time().map_or(1, |d| d.as_millis()) as f64 * entry.load_len(state.corpus())? as f64) @@ -52,11 +53,9 @@ const HAVOC_MAX_MULT: f64 = 64.0; /// The power assigned to each corpus entry /// This result is used for power scheduling #[derive(Debug, Clone)] -pub struct CorpusPowerTestcaseScore { - phantom: PhantomData, -} +pub struct CorpusPowerTestcaseScore {} -impl TestcaseScore for CorpusPowerTestcaseScore +impl TestcaseScore for CorpusPowerTestcaseScore where S: HasCorpus + HasMetadata, { @@ -67,23 +66,26 @@ where clippy::cast_sign_loss, clippy::cast_lossless )] - fn compute(state: &S, entry: &mut Testcase) -> Result { + fn compute( + state: &S, + entry: &mut Testcase<::Input>, + ) -> Result { let psmeta = state.metadata::()?; let fuzz_mu = if let Some(strat) = psmeta.strat() { - if strat == PowerSchedule::COE { + if *strat.base() == BaseSchedule::COE { let corpus = state.corpus(); let mut n_paths = 0; let mut v = 0.0; let cur_index = state.corpus().current().unwrap(); - for idx in corpus.ids() { - let n_fuzz_entry = if cur_index == idx { + for id in corpus.ids() { + let n_fuzz_entry = if cur_index == id { entry .metadata::()? .n_fuzz_entry() } else { corpus - .get(idx)? + .get(id)? .borrow() .metadata::()? .n_fuzz_entry() @@ -175,14 +177,14 @@ where // COE and Fast schedule are fairly different from what are described in the original thesis, // This implementation follows the changes made in this pull request https://github.com/AFLplusplus/AFLplusplus/pull/568 if let Some(strat) = psmeta.strat() { - match strat { - PowerSchedule::EXPLORE => { + match strat.base() { + BaseSchedule::EXPLORE => { // Nothing happens in EXPLORE } - PowerSchedule::EXPLOIT => { + BaseSchedule::EXPLOIT => { factor = MAX_FACTOR; } - PowerSchedule::COE => { + BaseSchedule::COE => { if libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()])) > fuzz_mu && !favored { @@ -190,7 +192,7 @@ where factor = 0.0; } } - PowerSchedule::FAST => { + BaseSchedule::FAST => { if entry.scheduled_count() != 0 { let lg = libm::log2(f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()])); @@ -229,11 +231,11 @@ where } } } - PowerSchedule::LIN => { + BaseSchedule::LIN => { factor = (entry.scheduled_count() as f64) / f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1); } - PowerSchedule::QUAD => { + BaseSchedule::QUAD => { factor = ((entry.scheduled_count() * entry.scheduled_count()) as f64) / f64::from(psmeta.n_fuzz()[tcmeta.n_fuzz_entry()] + 1); } @@ -241,7 +243,7 @@ where } if let Some(strat) = psmeta.strat() { - if strat != PowerSchedule::EXPLORE { + if *strat.base() != BaseSchedule::EXPLORE { if factor > MAX_FACTOR { factor = MAX_FACTOR; } @@ -252,7 +254,7 @@ where // Lower bound if the strat is not COE. if let Some(strat) = psmeta.strat() { - if strat == PowerSchedule::COE && perf_score < 1.0 { + if *strat.base() == BaseSchedule::COE && perf_score < 1.0 { perf_score = 1.0; } } @@ -262,6 +264,10 @@ where perf_score = HAVOC_MAX_MULT * 100.0; } + if entry.objectives_found() > 0 && psmeta.strat().is_some_and(|s| s.avoid_crash()) { + perf_score *= 0.00; + } + Ok(perf_score) } } @@ -269,17 +275,18 @@ where /// The weight for each corpus entry /// This result is used for corpus scheduling #[derive(Debug, Clone)] -pub struct CorpusWeightTestcaseScore { - phantom: PhantomData, -} +pub struct CorpusWeightTestcaseScore {} -impl TestcaseScore for CorpusWeightTestcaseScore +impl TestcaseScore for CorpusWeightTestcaseScore where S: HasCorpus + HasMetadata, { /// Compute the `weight` used in weighted corpus entry selection algo #[allow(clippy::cast_precision_loss, clippy::cast_lossless)] - fn compute(state: &S, entry: &mut Testcase) -> Result { + fn compute( + state: &S, + entry: &mut Testcase<::Input>, + ) -> Result { let mut weight = 1.0; let psmeta = state.metadata::()?; @@ -303,13 +310,15 @@ where let q_bitmap_size = tcmeta.bitmap_size() as f64; - if let Some( - PowerSchedule::FAST | PowerSchedule::COE | PowerSchedule::LIN | PowerSchedule::QUAD, - ) = psmeta.strat() - { - let hits = psmeta.n_fuzz()[tcmeta.n_fuzz_entry()]; - if hits > 0 { - weight /= libm::log10(f64::from(hits)) + 1.0; + if let Some(ps) = psmeta.strat() { + match ps.base() { + BaseSchedule::FAST | BaseSchedule::COE | BaseSchedule::LIN | BaseSchedule::QUAD => { + let hits = psmeta.n_fuzz()[tcmeta.n_fuzz_entry()]; + if hits > 0 { + weight /= libm::log10(f64::from(hits)) + 1.0; + } + } + _ => (), } } @@ -333,6 +342,10 @@ where weight *= 2.0; } + if entry.objectives_found() > 0 && psmeta.strat().is_some_and(|s| s.avoid_crash()) { + weight *= 0.00; + } + assert!(weight.is_normal()); Ok(weight) diff --git a/libafl/src/schedulers/tuneable.rs b/libafl/src/schedulers/tuneable.rs index de4fb44332..b677e71896 100644 --- a/libafl/src/schedulers/tuneable.rs +++ b/libafl/src/schedulers/tuneable.rs @@ -3,16 +3,15 @@ //! chose the next corpus entry manually use alloc::borrow::ToOwned; -use core::marker::PhantomData; use libafl_bolts::impl_serdeany; use serde::{Deserialize, Serialize}; use super::RemovableScheduler; use crate::{ - corpus::{Corpus, CorpusId, HasTestcase}, + corpus::{Corpus, CorpusId}, schedulers::Scheduler, - state::{HasCorpus, State, UsesState}, + state::HasCorpus, Error, HasMetadata, }; @@ -30,33 +29,35 @@ impl_serdeany!(TuneableSchedulerMetadata); /// Walk the corpus in a queue-like fashion /// With the specific `set_next` method, we can chose the next corpus entry manually #[derive(Debug, Clone)] -pub struct TuneableScheduler { - phantom: PhantomData, -} +pub struct TuneableScheduler {} -impl TuneableScheduler -where - S: HasMetadata + HasCorpus, -{ +impl TuneableScheduler { /// Creates a new `TuneableScheduler` #[must_use] - pub fn new(state: &mut S) -> Self { + pub fn new(state: &mut S) -> Self + where + S: HasMetadata, + { if !state.has_metadata::() { state.add_metadata(TuneableSchedulerMetadata::default()); } - Self { - phantom: PhantomData, - } + Self {} } - fn metadata_mut(state: &mut S) -> &mut TuneableSchedulerMetadata { + fn metadata_mut(state: &mut S) -> &mut TuneableSchedulerMetadata + where + S: HasMetadata, + { state .metadata_map_mut() .get_mut::() .unwrap() } - fn metadata(state: &S) -> &TuneableSchedulerMetadata { + fn metadata(state: &S) -> &TuneableSchedulerMetadata + where + S: HasMetadata, + { state .metadata_map() .get::() @@ -64,23 +65,35 @@ where } /// Sets the next corpus id to be used - pub fn set_next(state: &mut S, next: CorpusId) { + pub fn set_next(state: &mut S, next: CorpusId) + where + S: HasMetadata, + { Self::metadata_mut(state).next = Some(next); } /// Gets the next set corpus id - pub fn get_next(state: &S) -> Option { + pub fn get_next(state: &S) -> Option + where + S: HasMetadata, + { Self::metadata(state).next } /// Resets this to a queue scheduler - pub fn reset(state: &mut S) { + pub fn reset(state: &mut S) + where + S: HasMetadata, + { let metadata = Self::metadata_mut(state); metadata.next = None; } /// Gets the current corpus entry id - pub fn get_current(state: &S) -> CorpusId { + pub fn get_current(state: &S) -> CorpusId + where + S: HasCorpus, + { state .corpus() .current() @@ -88,36 +101,26 @@ where } } -impl UsesState for TuneableScheduler -where - S: State, -{ - type State = S; -} +impl RemovableScheduler for TuneableScheduler {} -impl RemovableScheduler for TuneableScheduler where - S: HasCorpus + HasMetadata + HasTestcase + State -{ -} - -impl Scheduler for TuneableScheduler +impl Scheduler for TuneableScheduler where - S: HasCorpus + HasMetadata + HasTestcase + State, + S: HasCorpus + HasMetadata, { - fn on_add(&mut self, state: &mut Self::State, idx: CorpusId) -> Result<(), Error> { + fn on_add(&mut self, state: &mut S, id: CorpusId) -> Result<(), Error> { // Set parent id - let current_idx = *state.corpus().current(); + let current_id = *state.corpus().current(); state .corpus() - .get(idx)? + .get(id)? .borrow_mut() - .set_parent_id_optional(current_idx); + .set_parent_id_optional(current_id); Ok(()) } /// Gets the next entry in the queue - fn next(&mut self, state: &mut Self::State) -> Result { + fn next(&mut self, state: &mut S) -> Result { if state.corpus().count() == 0 { return Err(Error::empty( "No entries in corpus. This often implies the target is not properly instrumented." @@ -132,7 +135,15 @@ where } else { state.corpus().first().unwrap() }; - self.set_current_scheduled(state, Some(id))?; + >::set_current_scheduled(self, state, Some(id))?; Ok(id) } + fn set_current_scheduled( + &mut self, + state: &mut S, + next_id: Option, + ) -> Result<(), Error> { + *state.corpus_mut().current_mut() = next_id; + Ok(()) + } } diff --git a/libafl/src/schedulers/weighted.rs b/libafl/src/schedulers/weighted.rs index 7aeeda6bc8..6ee0162018 100644 --- a/libafl/src/schedulers/weighted.rs +++ b/libafl/src/schedulers/weighted.rs @@ -1,3 +1,5 @@ +//! An AFL++-style scheduler with a weighted queue. +//! //! The queue corpus scheduler with weighted queue item selection [from AFL++](https://github.com/AFLplusplus/AFLplusplus/blob/1d4f1e48797c064ee71441ba555b29fc3f467983/src/afl-fuzz-queue.c#L32). //! This queue corpus scheduler needs calibration stage. @@ -6,22 +8,23 @@ use core::marker::PhantomData; use hashbrown::HashMap; use libafl_bolts::{ rands::Rand, - tuples::{Handle, Handled}, + tuples::{Handle, Handled, MatchName}, Named, }; use serde::{Deserialize, Serialize}; +use super::powersched::PowerSchedule; use crate::{ corpus::{Corpus, CorpusId, HasTestcase, Testcase}, - inputs::UsesInput, - observers::{MapObserver, ObserversTuple}, + observers::MapObserver, random_corpus_id, schedulers::{ - powersched::{PowerSchedule, SchedulerMetadata}, + on_add_metadata_default, on_evaluation_metadata_default, on_next_metadata_default, + powersched::{BaseSchedule, SchedulerMetadata}, testcase_score::{CorpusWeightTestcaseScore, TestcaseScore}, - AflScheduler, RemovableScheduler, Scheduler, + AflScheduler, HasQueueCycles, RemovableScheduler, Scheduler, }, - state::{HasCorpus, HasRand, State, UsesState}, + state::{HasCorpus, HasRand}, Error, HasMetadata, }; @@ -95,32 +98,36 @@ libafl_bolts::impl_serdeany!(WeightedScheduleMetadata); /// A corpus scheduler using power schedules with weighted queue item selection algo. #[derive(Clone, Debug)] -pub struct WeightedScheduler { +pub struct WeightedScheduler { table_invalidated: bool, strat: Option, map_observer_handle: Handle, last_hash: usize, - phantom: PhantomData<(F, O, S)>, + queue_cycles: u64, + phantom: PhantomData<(F, O)>, /// Cycle `PowerSchedule` on completion of every queue cycle. cycle_schedules: bool, } -impl WeightedScheduler +impl WeightedScheduler where - F: TestcaseScore, - O: MapObserver, - S: HasCorpus + HasMetadata + HasRand, - C: AsRef + Named, + C: Named, { /// Create a new [`WeightedScheduler`] without any power schedule #[must_use] - pub fn new(state: &mut S, map_observer: &C) -> Self { + pub fn new(state: &mut S, map_observer: &C) -> Self + where + S: HasMetadata, + { Self::with_schedule(state, map_observer, None) } /// Create a new [`WeightedScheduler`] #[must_use] - pub fn with_schedule(state: &mut S, map_observer: &C, strat: Option) -> Self { + pub fn with_schedule(state: &mut S, map_observer: &C, strat: Option) -> Self + where + S: HasMetadata, + { let _ = state.metadata_or_insert_with(|| SchedulerMetadata::new(strat)); let _ = state.metadata_or_insert_with(WeightedScheduleMetadata::new); @@ -128,6 +135,7 @@ where strat, map_observer_handle: map_observer.handle(), last_hash: 0, + queue_cycles: 0, table_invalidated: true, cycle_schedules: false, phantom: PhantomData, @@ -154,7 +162,11 @@ where clippy::cast_precision_loss, clippy::cast_lossless )] - pub fn create_alias_table(&self, state: &mut S) -> Result<(), Error> { + pub fn create_alias_table(&self, state: &mut S) -> Result<(), Error> + where + F: TestcaseScore, + S: HasCorpus + HasMetadata, + { let n = state.corpus().count(); let mut alias_table: HashMap = HashMap::default(); @@ -232,44 +244,33 @@ where } /// Cycles the strategy of the scheduler; tries to mimic AFL++'s cycling formula - fn cycle_schedule(&mut self, metadata: &mut SchedulerMetadata) -> Result { - let next_strat = match metadata.strat().ok_or(Error::illegal_argument( + fn cycle_schedule(&mut self, metadata: &mut SchedulerMetadata) -> Result<(), Error> { + let mut ps = metadata.strat().ok_or(Error::illegal_argument( "No strategy specified when initializing scheduler; cannot cycle!", - ))? { - PowerSchedule::EXPLORE => PowerSchedule::EXPLOIT, - PowerSchedule::COE => PowerSchedule::LIN, - PowerSchedule::LIN => PowerSchedule::QUAD, - PowerSchedule::FAST => PowerSchedule::COE, - PowerSchedule::QUAD => PowerSchedule::FAST, - PowerSchedule::EXPLOIT => PowerSchedule::EXPLORE, + ))?; + let new_base = match ps.base() { + BaseSchedule::EXPLORE => BaseSchedule::EXPLOIT, + BaseSchedule::COE => BaseSchedule::LIN, + BaseSchedule::LIN => BaseSchedule::QUAD, + BaseSchedule::FAST => BaseSchedule::COE, + BaseSchedule::QUAD => BaseSchedule::FAST, + BaseSchedule::EXPLOIT => BaseSchedule::EXPLORE, }; - metadata.set_strat(Some(next_strat)); + ps.set_base(new_base); + metadata.set_strat(Some(ps)); // We need to recalculate the scores of testcases. self.table_invalidated = true; - Ok(next_strat) + Ok(()) } } -impl UsesState for WeightedScheduler -where - S: State, -{ - type State = S; -} - -impl RemovableScheduler for WeightedScheduler -where - F: TestcaseScore, - O: MapObserver, - S: HasCorpus + HasMetadata + HasRand + HasTestcase + State, - C: AsRef + Named, -{ +impl RemovableScheduler for WeightedScheduler { /// This will *NOT* neutralize the effect of this removed testcase from the global data such as `SchedulerMetadata` fn on_remove( &mut self, - _state: &mut Self::State, - _idx: CorpusId, - _prev: &Option::Input>>, + _state: &mut S, + _id: CorpusId, + _prev: &Option>, ) -> Result<(), Error> { self.table_invalidated = true; Ok(()) @@ -278,22 +279,18 @@ where /// This will *NOT* neutralize the effect of this removed testcase from the global data such as `SchedulerMetadata` fn on_replace( &mut self, - _state: &mut Self::State, - _idx: CorpusId, - _prev: &Testcase<::Input>, + _state: &mut S, + _id: CorpusId, + _prev: &Testcase, ) -> Result<(), Error> { self.table_invalidated = true; Ok(()) } } -impl AflScheduler for WeightedScheduler -where - F: TestcaseScore, - O: MapObserver, - S: HasCorpus + HasMetadata + HasTestcase + HasRand + State, - C: AsRef + Named, -{ +impl AflScheduler for WeightedScheduler { + type MapObserverRef = C; + fn last_hash(&self) -> usize { self.last_hash } @@ -307,30 +304,36 @@ where } } -impl Scheduler for WeightedScheduler +impl HasQueueCycles for WeightedScheduler { + fn queue_cycles(&self) -> u64 { + self.queue_cycles + } +} + +impl Scheduler<::Input, S> for WeightedScheduler where + C: AsRef + Named, F: TestcaseScore, O: MapObserver, - S: HasCorpus + HasMetadata + HasRand + HasTestcase + State, - C: AsRef + Named, + S: HasCorpus + HasMetadata + HasRand + HasTestcase, { /// Called when a [`Testcase`] is added to the corpus - fn on_add(&mut self, state: &mut S, idx: CorpusId) -> Result<(), Error> { - self.on_add_metadata(state, idx)?; + fn on_add(&mut self, state: &mut S, id: CorpusId) -> Result<(), Error> { + on_add_metadata_default(self, state, id)?; self.table_invalidated = true; Ok(()) } fn on_evaluation( &mut self, - state: &mut Self::State, - input: &::Input, + state: &mut S, + _input: &::Input, observers: &OT, ) -> Result<(), Error> where - OT: ObserversTuple, + OT: MatchName, { - self.on_evaluation_metadata(state, input, observers) + on_evaluation_metadata_default(self, state, observers) } #[allow(clippy::similar_names, clippy::cast_precision_loss)] @@ -369,8 +372,9 @@ where // Update depth if runs_in_current_cycle >= corpus_counts { + self.queue_cycles += 1; let psmeta = state.metadata_mut::()?; - psmeta.set_queue_cycles(psmeta.queue_cycles() + 1); + psmeta.set_queue_cycles(self.queue_cycles()); if self.cycle_schedules { self.cycle_schedule(psmeta)?; } @@ -384,15 +388,15 @@ where /// Set current fuzzed corpus id and `scheduled_count` fn set_current_scheduled( &mut self, - state: &mut Self::State, - next_idx: Option, + state: &mut S, + next_id: Option, ) -> Result<(), Error> { - self.on_next_metadata(state, next_idx)?; + on_next_metadata_default(state)?; - *state.corpus_mut().current_mut() = next_idx; + *state.corpus_mut().current_mut() = next_id; Ok(()) } } /// The standard corpus weight, same as in `AFL++` -pub type StdWeightedScheduler = WeightedScheduler, O, S>; +pub type StdWeightedScheduler = WeightedScheduler; diff --git a/libafl/src/stages/afl_stats.rs b/libafl/src/stages/afl_stats.rs new file mode 100644 index 0000000000..7f37241fc3 --- /dev/null +++ b/libafl/src/stages/afl_stats.rs @@ -0,0 +1,804 @@ +//! Stage to compute and report AFL++ stats +use alloc::{string::String, vec::Vec}; +use core::{marker::PhantomData, time::Duration}; +use std::{ + borrow::Cow, + fmt::Display, + fs::{File, OpenOptions}, + io::{BufRead, BufReader, Write}, + path::{Path, PathBuf}, + process, +}; + +#[cfg(unix)] +use libafl_bolts::os::peak_rss_mb_child_processes; +use libafl_bolts::{ + core_affinity::CoreId, + current_time, + tuples::{Handle, Handled, MatchNameRef}, + Named, +}; +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "track_hit_feedbacks")] +use crate::feedbacks::{CRASH_FEEDBACK_NAME, TIMEOUT_FEEDBACK_NAME}; +use crate::{ + corpus::{Corpus, HasCurrentCorpusId, SchedulerTestcaseMetadata, Testcase}, + events::EventFirer, + executors::HasObservers, + mutators::Tokens, + observers::MapObserver, + schedulers::{minimizer::IsFavoredMetadata, HasQueueCycles}, + stages::{calibrate::UnstableEntriesMetadata, Stage}, + state::{HasCorpus, HasExecutions, HasImported, HasStartTime, Stoppable, UsesState}, + std::string::ToString, + Error, HasMetadata, HasNamedMetadata, HasScheduler, +}; +/// AFL++'s default stats update interval +pub const AFL_FUZZER_STATS_UPDATE_INTERVAL_SECS: u64 = 60; + +/// `CalibrationTime` - Use in conjunction with `TimeTrackingFeedback` +#[derive(Debug, Serialize, Deserialize)] +pub struct CalibrationTime(pub Duration); +impl From for CalibrationTime { + fn from(value: Duration) -> Self { + Self(value) + } +} + +libafl_bolts::impl_serdeany!(CalibrationTime); + +/// `SyncTime` - Use in conjunction with `TimeTrackingFeedback` +#[derive(Debug, Serialize, Deserialize)] +pub struct SyncTime(pub Duration); +impl From for SyncTime { + fn from(value: Duration) -> Self { + Self(value) + } +} + +libafl_bolts::impl_serdeany!(SyncTime); + +/// `FuzzTime` - Use in conjunction with `TimeTrackingFeedback` +#[derive(Debug, Serialize, Deserialize)] +pub struct FuzzTime(pub Duration); +impl From for FuzzTime { + fn from(value: Duration) -> Self { + Self(value) + } +} + +libafl_bolts::impl_serdeany!(FuzzTime); + +/// The [`AflStatsStage`] is a Stage that calculates and writes +/// AFL++'s `fuzzer_stats` and `plot_data` information. +#[derive(Debug, Clone)] +pub struct AflStatsStage { + map_observer_handle: Handle, + stats_file_path: PathBuf, + plot_file_path: Option, + start_time: u64, + // the number of testcases that have been fuzzed + has_fuzzed_size: usize, + // the number of "favored" testcases + is_favored_size: usize, + // the last time that we report all stats + last_report_time: Duration, + // the interval at which we report all stats + stats_report_interval: Duration, + pid: u32, + slowest_exec: Duration, + max_depth: u64, + cycles_done: u64, + saved_crashes: u64, + saved_hangs: u64, + last_find: Duration, + last_hang: Duration, + last_crash: Duration, + exec_timeout: u64, + execs_at_last_objective: u64, + cycles_wo_finds: u64, + /// banner text (e.g., the target name) + afl_banner: Cow<'static, str>, + /// the version of libafl-fuzz used + afl_version: Cow<'static, str>, + /// default, persistent, qemu, unicorn, non-instrumented + target_mode: Cow<'static, str>, + /// full command line used for the fuzzing session + command_line: Cow<'static, str>, + /// Amount of tokens provided by the user. Used to determine autotokens count. + dict_count: usize, + /// autotokens are enabled + autotokens_enabled: bool, + /// The core we are bound to + core_id: CoreId, + phantom_data: PhantomData<(O, E, EM, Z)>, +} + +/// AFL++'s `fuzzer_stats` +#[derive(Debug, Clone)] +pub struct AFLFuzzerStats<'a> { + /// unix time indicating the start time of afl-fuzz + start_time: u64, + /// unix time corresponding to the last interval + last_update: u64, + /// run time in seconds to the last update of this file + run_time: u64, + /// process id of the fuzzer process + fuzzer_pid: u32, + /// queue cycles completed so far + cycles_done: u64, + /// number of queue cycles without any new paths found + cycles_wo_find: u64, + /// longest time in seconds no new path was found + time_wo_finds: u64, + /// Time spent fuzzing + fuzz_time: u64, + /// Time spent calibrating inputs + calibration_time: u64, + /// Time spent syncing with foreign fuzzers + /// NOTE: Syncing between our own instances is not counted. + sync_time: u64, + /// TODO + trim_time: u64, + /// number of fuzzer executions attempted (what does attempted mean here?) + execs_done: u64, + /// overall number of execs per second + execs_per_sec: u64, + /// TODO + execs_ps_last_min: u64, + /// total number of entries in the queue + corpus_count: usize, + /// number of queue entries that are favored + corpus_favored: usize, + /// number of entries discovered through local fuzzing + corpus_found: usize, + /// number of entries imported from other instances + corpus_imported: usize, + /// number of levels in the generated data set + max_depth: u64, + /// currently processed entry number + cur_item: usize, + /// number of favored entries still waiting to be fuzzed + pending_favs: usize, + /// number of all entries waiting to be fuzzed + pending_total: usize, + /// number of test cases showing variable behavior + corpus_variable: u64, + /// percentage of bitmap bytes that behave consistently + stability: f64, + /// percentage of edge coverage found in the map so far, + bitmap_cvg: f64, + /// number of unique crashes recorded + saved_crashes: u64, + /// number of unique hangs encountered + saved_hangs: u64, + /// seconds since the last find was found + last_find: Duration, + /// seconds since the last crash was found + last_crash: Duration, + /// seconds since the last hang was found + last_hang: Duration, + /// execs since the last crash was found + execs_since_crash: u64, + /// the -t command line value + exec_timeout: u64, + /// real time of the slowest execution in ms + slowest_exec_ms: u128, + /// max rss usage reached during fuzzing in MB + peak_rss_mb: i64, + /// TODO + cpu_affinity: usize, + /// how many edges have been found + edges_found: u64, + /// Size of our edges map + total_edges: u64, + /// how many edges are non-deterministic + var_byte_count: usize, + /// TODO: + havoc_expansion: usize, + /// Amount of automatic dict entries found + auto_dict_entries: usize, + /// TODO: + testcache_size: usize, + /// TODO: + testcache_count: usize, + /// TODO: + testcache_evict: usize, + /// banner text (e.g., the target name) + afl_banner: &'a Cow<'static, str>, + /// the version of AFL++ used + afl_version: &'a Cow<'static, str>, + /// default, persistent, qemu, unicorn, non-instrumented + target_mode: &'a Cow<'static, str>, + /// full command line used for the fuzzing session + command_line: &'a str, +} +/// AFL++'s `plot_data` +#[derive(Debug, Clone)] +pub struct AFLPlotData<'a> { + relative_time: &'a u64, + cycles_done: &'a u64, + cur_item: &'a usize, + corpus_count: &'a usize, + pending_total: &'a usize, + pending_favs: &'a usize, + /// Note: renamed `map_size` -> `total_edges` for consistency with `fuzzer_stats` + total_edges: &'a u64, + saved_crashes: &'a u64, + saved_hangs: &'a u64, + max_depth: &'a u64, + execs_per_sec: &'a u64, + /// Note: renamed `total_execs` -> `execs_done` for consistency with `fuzzer_stats` + execs_done: &'a u64, + edges_found: &'a u64, +} + +impl UsesState for AflStatsStage +where + E: UsesState, + EM: EventFirer, + Z: UsesState, +{ + type State = E::State; +} + +impl Stage for AflStatsStage +where + E: UsesState + HasObservers, + EM: EventFirer, + Z: UsesState + HasScheduler, + E::State: HasImported + + HasCorpus + + HasMetadata + + HasStartTime + + HasExecutions + + HasNamedMetadata + + Stoppable, + E::Observers: MatchNameRef, + O: MapObserver, + C: AsRef + Named, + ::Scheduler: HasQueueCycles, + <::State as HasCorpus>::Corpus: Corpus, +{ + #[allow(clippy::too_many_lines)] + fn perform( + &mut self, + fuzzer: &mut Z, + executor: &mut E, + state: &mut E::State, + _manager: &mut EM, + ) -> Result<(), Error> { + let Some(corpus_idx) = state.current_corpus_id()? else { + return Err(Error::illegal_state( + "state is not currently processing a corpus index", + )); + }; + let testcase = state.corpus().get(corpus_idx)?.borrow(); + // NOTE: scheduled_count represents the amount of fuzz runs a + // testcase has had. Since this stage is kept at the very end of stage list, + // the entry would have been fuzzed already (and should contain IsFavoredMetadata) but would have a scheduled count of zero + // since the scheduled count is incremented after all stages have been run. + if testcase.scheduled_count() == 0 { + // New testcase! + self.cycles_wo_finds = 0; + self.update_last_find(); + #[cfg(feature = "track_hit_feedbacks")] + { + self.maybe_update_last_crash(&testcase, state); + self.maybe_update_last_hang(&testcase, state); + } + self.update_has_fuzzed_size(); + self.maybe_update_is_favored_size(&testcase); + } + self.maybe_update_slowest_exec(&testcase); + self.maybe_update_max_depth(&testcase); + + // See if we actually need to run the stage, if not, avoid dynamic value computation. + if !self.check_interval() { + return Ok(()); + } + + let corpus_size = state.corpus().count(); + let total_executions = *state.executions(); + + let scheduler = fuzzer.scheduler(); + let queue_cycles = scheduler.queue_cycles(); + self.maybe_update_cycles(queue_cycles); + self.maybe_update_cycles_wo_finds(queue_cycles); + + let observers = executor.observers(); + let map_observer = observers + .get(&self.map_observer_handle) + .ok_or_else(|| Error::key_not_found("invariant: MapObserver not found".to_string()))? + .as_ref(); + let filled_entries_in_map = map_observer.count_bytes(); + let map_size = map_observer.usable_count(); + // Since we do not calibrate when using `QueueScheduler`; we cannot calculate unstable entries. + let unstable_entries_in_map = state + .metadata_map() + .get::() + .map_or(0, |m| m.unstable_entries().len()); + + let auto_dict_entries = if self.autotokens_enabled { + state + .metadata::()? + .len() + .saturating_sub(self.dict_count) + } else { + 0 + }; + #[allow(clippy::similar_names)] + let stats = AFLFuzzerStats { + start_time: self.start_time, + last_update: self.last_report_time.as_secs(), + run_time: self.last_report_time.as_secs() - self.start_time, + fuzzer_pid: self.pid, + cycles_done: queue_cycles, + cycles_wo_find: self.cycles_wo_finds, + fuzz_time: state + .metadata::() + .map_or(Duration::from_secs(0), |d| d.0) + .as_secs(), + calibration_time: state + .metadata::() + .map_or(Duration::from_secs(0), |d| d.0) + .as_secs(), + sync_time: state + .metadata::() + .map_or(Duration::from_secs(0), |d| d.0) + .as_secs(), + trim_time: 0, // TODO + execs_done: total_executions, + execs_per_sec: *state.executions(), // TODO + execs_ps_last_min: *state.executions(), // TODO + max_depth: self.max_depth, + corpus_count: corpus_size, + corpus_favored: corpus_size - self.is_favored_size, + corpus_found: corpus_size - state.imported(), + corpus_imported: *state.imported(), + cur_item: corpus_idx.into(), + pending_total: corpus_size - self.has_fuzzed_size, + pending_favs: 0, // TODO + time_wo_finds: (current_time() - self.last_find).as_secs(), + corpus_variable: 0, + stability: self.calculate_stability(unstable_entries_in_map, filled_entries_in_map), + #[allow(clippy::cast_precision_loss)] + bitmap_cvg: (filled_entries_in_map as f64 / map_size as f64) * 100.0, + saved_crashes: self.saved_crashes, + saved_hangs: self.saved_hangs, + last_find: self.last_find, + last_hang: self.last_hang, + last_crash: self.last_crash, + execs_since_crash: total_executions - self.execs_at_last_objective, + exec_timeout: self.exec_timeout, + slowest_exec_ms: self.slowest_exec.as_millis(), + #[cfg(unix)] + peak_rss_mb: peak_rss_mb_child_processes()?, + #[cfg(not(unix))] + peak_rss_mb: 0, // TODO for Windows + cpu_affinity: self.core_id.0, + total_edges: map_size as u64, + edges_found: filled_entries_in_map, + var_byte_count: unstable_entries_in_map, + havoc_expansion: 0, // TODO + auto_dict_entries, + testcache_size: 0, + testcache_count: 0, + testcache_evict: 0, + afl_banner: &self.afl_banner, + afl_version: &self.afl_version, + target_mode: &self.target_mode, + command_line: &self.command_line, + }; + let plot_data = AFLPlotData { + corpus_count: &stats.corpus_count, + cur_item: &stats.cur_item, + cycles_done: &stats.cycles_done, + edges_found: &stats.edges_found, + total_edges: &stats.total_edges, + execs_per_sec: &stats.execs_per_sec, + pending_total: &stats.pending_total, + pending_favs: &stats.pending_favs, + max_depth: &stats.max_depth, + relative_time: &stats.run_time, + saved_hangs: &stats.saved_hangs, + saved_crashes: &stats.saved_crashes, + execs_done: &stats.execs_done, + }; + self.write_fuzzer_stats(&stats)?; + if self.plot_file_path.is_some() { + self.write_plot_data(&plot_data)?; + } + Ok(()) + } + + fn should_restart(&mut self, _state: &mut Self::State) -> Result { + Ok(true) + } + + fn clear_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { + Ok(()) + } +} + +impl AflStatsStage +where + E: UsesState + HasObservers, + EM: EventFirer, + Z: UsesState, + E::State: HasImported + HasCorpus + HasMetadata + HasExecutions, + C: AsRef + Named, + O: MapObserver, +{ + /// Builder for `AflStatsStage` + #[must_use] + pub fn builder() -> AflStatsStageBuilder { + AflStatsStageBuilder::new() + } + + fn write_fuzzer_stats(&self, stats: &AFLFuzzerStats) -> Result<(), Error> { + let tmp_file = self + .stats_file_path + .parent() + .expect("fuzzer_stats file must have a parent!") + .join(".fuzzer_stats_tmp"); + std::fs::write(&tmp_file, stats.to_string())?; + _ = std::fs::copy(&tmp_file, &self.stats_file_path)?; + std::fs::remove_file(tmp_file)?; + Ok(()) + } + + fn write_plot_data(&self, plot_data: &AFLPlotData) -> Result<(), Error> { + let mut file = OpenOptions::new().append(true).open( + self.plot_file_path + .as_ref() + .expect("invariant; should never occur"), + )?; + writeln!(file, "{plot_data}")?; + Ok(()) + } + + fn maybe_update_is_favored_size(&mut self, testcase: &Testcase) { + if testcase.has_metadata::() { + self.is_favored_size += 1; + } + } + + fn maybe_update_slowest_exec(&mut self, testcase: &Testcase) { + if let Some(exec_time) = testcase.exec_time() { + if exec_time > &self.slowest_exec { + self.slowest_exec = *exec_time; + } + } + } + + fn update_has_fuzzed_size(&mut self) { + self.has_fuzzed_size += 1; + } + + fn maybe_update_max_depth(&mut self, testcase: &Testcase) { + if let Ok(metadata) = testcase.metadata::() { + if metadata.depth() > self.max_depth { + self.max_depth = metadata.depth(); + } + } + } + + fn update_last_find(&mut self) { + self.last_find = current_time(); + } + + #[cfg(feature = "track_hit_feedbacks")] + fn maybe_update_last_crash(&mut self, testcase: &Testcase, state: &E::State) { + #[cfg(feature = "track_hit_feedbacks")] + if testcase + .hit_objectives() + .contains(&Cow::Borrowed(CRASH_FEEDBACK_NAME)) + { + self.last_crash = current_time(); + self.execs_at_last_objective = *state.executions(); + } + } + + #[cfg(feature = "track_hit_feedbacks")] + fn maybe_update_last_hang(&mut self, testcase: &Testcase, state: &E::State) { + if testcase + .hit_objectives() + .contains(&Cow::Borrowed(TIMEOUT_FEEDBACK_NAME)) + { + self.last_hang = current_time(); + self.execs_at_last_objective = *state.executions(); + } + } + + fn check_interval(&mut self) -> bool { + let cur = current_time(); + if cur.checked_sub(self.last_report_time).unwrap_or_default() > self.stats_report_interval { + self.last_report_time = cur; + return true; + } + false + } + fn maybe_update_cycles(&mut self, queue_cycles: u64) { + if queue_cycles > self.cycles_done { + self.cycles_done += 1; + } + } + + fn maybe_update_cycles_wo_finds(&mut self, queue_cycles: u64) { + if queue_cycles > self.cycles_done && self.last_find < current_time() { + self.cycles_wo_finds += 1; + } + } + + #[allow(clippy::cast_precision_loss)] + #[allow(clippy::unused_self)] + fn calculate_stability(&self, unstable_entries: usize, filled_entries: u64) -> f64 { + ((filled_entries as f64 - unstable_entries as f64) / filled_entries as f64) * 100.0 + } +} + +impl Display for AFLPlotData<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{},", self.relative_time)?; + write!(f, "{},", self.cycles_done)?; + write!(f, "{},", self.cur_item)?; + write!(f, "{},", self.corpus_count)?; + write!(f, "{},", self.pending_total)?; + write!(f, "{},", self.pending_favs)?; + write!(f, "{},", self.total_edges)?; + write!(f, "{},", self.saved_crashes)?; + write!(f, "{},", self.saved_hangs)?; + write!(f, "{},", self.max_depth)?; + write!(f, "{},", self.execs_per_sec)?; + write!(f, "{},", self.execs_done)?; + write!(f, "{}", self.edges_found)?; + Ok(()) + } +} +impl AFLPlotData<'_> { + fn get_header() -> String { + "# relative_time, cycles_done, cur_item, corpus_count, pending_total, pending_favs, total_edges, saved_crashes, saved_hangs, max_depth, execs_per_sec, execs_done, edges_found".to_string() + } +} +impl Display for AFLFuzzerStats<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, "start_time : {}", &self.start_time)?; + writeln!(f, "start_time : {}", &self.start_time)?; + writeln!(f, "last_update : {}", &self.last_update)?; + writeln!(f, "run_time : {}", &self.run_time)?; + writeln!(f, "fuzzer_pid : {}", &self.fuzzer_pid)?; + writeln!(f, "cycles_done : {}", &self.cycles_done)?; + writeln!(f, "cycles_wo_find : {}", &self.cycles_wo_find)?; + writeln!(f, "time_wo_finds : {}", &self.time_wo_finds)?; + writeln!(f, "fuzz_time : {}", &self.fuzz_time)?; + writeln!(f, "calibration_time : {}", &self.calibration_time)?; + writeln!(f, "sync_time : {}", &self.sync_time)?; + writeln!(f, "trim_time : {}", &self.trim_time)?; + writeln!(f, "execs_done : {}", &self.execs_done)?; + writeln!(f, "execs_per_sec : {}", &self.execs_per_sec)?; + writeln!(f, "execs_ps_last_min : {}", &self.execs_ps_last_min)?; + writeln!(f, "corpus_count : {}", &self.corpus_count)?; + writeln!(f, "corpus_favored : {}", &self.corpus_favored)?; + writeln!(f, "corpus_found : {}", &self.corpus_found)?; + writeln!(f, "corpus_imported : {}", &self.corpus_imported)?; + writeln!(f, "max_depth : {}", &self.max_depth)?; + writeln!(f, "cur_item : {}", &self.cur_item)?; + writeln!(f, "pending_favs : {}", &self.pending_favs)?; + writeln!(f, "pending_total : {}", &self.pending_total)?; + writeln!(f, "corpus_variable : {}", &self.corpus_variable)?; + writeln!(f, "stability : {:.2}%", &self.stability)?; + writeln!(f, "bitmap_cvg : {:.2}%", &self.bitmap_cvg)?; + writeln!(f, "saved_crashes : {}", &self.saved_crashes)?; + writeln!(f, "saved_hangs : {}", &self.saved_hangs)?; + writeln!(f, "last_find : {}", &self.last_find.as_secs())?; + writeln!(f, "last_crash : {}", &self.last_crash.as_secs())?; + writeln!(f, "last_hang : {}", &self.last_hang.as_secs())?; + writeln!(f, "execs_since_crash : {}", &self.execs_since_crash)?; + writeln!(f, "exec_timeout : {}", &self.exec_timeout)?; + writeln!(f, "slowest_exec_ms : {}", &self.slowest_exec_ms)?; + writeln!(f, "peak_rss_mb : {}", &self.peak_rss_mb)?; + writeln!(f, "cpu_affinity : {}", &self.cpu_affinity)?; + writeln!(f, "edges_found : {}", &self.edges_found)?; + writeln!(f, "total_edges : {}", &self.total_edges)?; + writeln!(f, "var_byte_count : {}", &self.var_byte_count)?; + writeln!(f, "havoc_expansion : {}", &self.havoc_expansion)?; + writeln!(f, "auto_dict_entries : {}", &self.auto_dict_entries)?; + writeln!(f, "testcache_size : {}", &self.testcache_size)?; + writeln!(f, "testcache_count : {}", &self.testcache_count)?; + writeln!(f, "testcache_evict : {}", &self.testcache_evict)?; + writeln!(f, "afl_banner : {}", self.afl_banner)?; + writeln!(f, "afl_version : {}", self.afl_version)?; + writeln!(f, "target_mode : {}", self.target_mode)?; + writeln!(f, "command_line : {}", self.command_line)?; + Ok(()) + } +} +/// Get the command used to invoke the fuzzer +#[must_use] +pub fn get_run_cmdline() -> Cow<'static, str> { + let args: Vec = std::env::args().collect(); + Cow::Owned(args.join(" ")) +} + +/// The Builder for `AflStatsStage` +#[derive(Debug)] +pub struct AflStatsStageBuilder { + stats_file_path: Option, + plot_file_path: Option, + core_id: Option, + map_observer_handle: Option>, + uses_autotokens: bool, + report_interval: Duration, + dict_count: usize, + exec_timeout: u64, + banner: String, + version: String, + target_mode: String, + phantom_data: PhantomData<(O, E, EM, Z)>, +} + +impl AflStatsStageBuilder +where + E: UsesState + HasObservers, + EM: EventFirer, + Z: UsesState, + E::State: HasImported + HasCorpus + HasMetadata + HasExecutions, + C: AsRef + Named, + O: MapObserver, +{ + fn new() -> Self { + Self { + report_interval: Duration::from_secs(AFL_FUZZER_STATS_UPDATE_INTERVAL_SECS), + stats_file_path: None, + plot_file_path: None, + core_id: None, + map_observer_handle: None, + uses_autotokens: false, + dict_count: 0, + exec_timeout: 0, + banner: String::default(), + version: String::default(), + target_mode: String::default(), + phantom_data: PhantomData, + } + } + + /// The file path to which we will write the fuzzer stats + #[must_use] + pub fn stats_file(mut self, path: PathBuf) -> Self { + self.stats_file_path = Some(path); + self + } + /// The file path to which we will write the plot data + #[must_use] + pub fn plot_file(mut self, path: PathBuf) -> Self { + self.plot_file_path = Some(path); + self + } + /// The core we are bound to + #[must_use] + pub fn core_id(mut self, core_id: CoreId) -> Self { + self.core_id = Some(core_id); + self + } + /// The interval with which we report stats + #[must_use] + pub fn report_interval(mut self, interval: Duration) -> Self { + self.report_interval = interval; + self + } + /// Our `MapObserver` + #[must_use] + pub fn map_observer(mut self, map_observer: &C) -> Self { + self.map_observer_handle = Some(map_observer.handle()); + self + } + /// If we use autotokens provided by the target + #[must_use] + pub fn uses_autotokens(mut self, uses: bool) -> Self { + self.uses_autotokens = uses; + self + } + /// The tokens utilized by the fuzzer + #[must_use] + pub fn tokens(mut self, tokens: &Tokens) -> Self { + self.dict_count = tokens.len(); + self + } + /// AFL++ Banner (typically the target) + #[must_use] + pub fn banner(mut self, banner: String) -> Self { + self.banner = banner; + self + } + /// Version of the fuzzer + #[must_use] + pub fn version(mut self, version: String) -> Self { + self.version = version; + self + } + /// The "timeout" value used in `TimeoutFeedback` + #[must_use] + pub fn exec_timeout(mut self, timeout: u64) -> Self { + self.exec_timeout = timeout; + self + } + /// Used in the UI (optional) + /// default, persistent, qemu, unicorn, non-instrumented etc + #[must_use] + pub fn target_mode(mut self, target_mode: String) -> Self { + self.target_mode = target_mode; + self + } + + fn create_plot_data_file(path: &Path) -> Result<(), Error> { + if path.exists() { + // check if it contains any data + let file = File::open(path)?; + if BufReader::new(file).lines().next().is_none() { + std::fs::write(path, AFLPlotData::get_header())?; + } + } else { + std::fs::write(path, AFLPlotData::get_header())?; + } + Ok(()) + } + + fn create_fuzzer_stats_file(path: &Path) -> Result<(), Error> { + if !path.exists() { + _ = OpenOptions::new().append(true).create(true).open(path)?; + } + Ok(()) + } + /// Build [`AflStatsStage`] + /// Will error if: + /// Cannot create the stats file + /// Cannot create the plot file (if provided) + /// No `MapObserver` supplied to the builder + /// No `stats_file_path` provieded + pub fn build(self) -> Result, Error> { + if self.stats_file_path.is_none() { + return Err(Error::illegal_argument("Must set `stats_file_path`")); + } + let stats_file_path = self.stats_file_path.unwrap(); + if self.map_observer_handle.is_none() { + return Err(Error::illegal_argument("Must set `map_observer`")); + } + if let Some(ref plot_file) = self.plot_file_path { + Self::create_plot_data_file(plot_file)?; + } + Self::create_fuzzer_stats_file(&stats_file_path)?; + Ok(AflStatsStage { + stats_file_path, + plot_file_path: self.plot_file_path, + map_observer_handle: self.map_observer_handle.unwrap(), + start_time: current_time().as_secs(), + stats_report_interval: self.report_interval, + has_fuzzed_size: 0, + is_favored_size: 0, + cycles_done: 0, + cycles_wo_finds: 0, + execs_at_last_objective: 0, + last_crash: current_time(), + last_find: current_time(), + last_hang: current_time(), + max_depth: 0, + saved_hangs: 0, + saved_crashes: 0, + slowest_exec: Duration::from_secs(0), + last_report_time: current_time(), + pid: process::id(), + exec_timeout: self.exec_timeout, + target_mode: Cow::Owned(self.target_mode), + afl_banner: Cow::Owned(self.banner), + afl_version: Cow::Owned(self.version), + command_line: get_run_cmdline(), + dict_count: self.dict_count, + core_id: self.core_id.unwrap_or(CoreId(0)), + autotokens_enabled: self.uses_autotokens, + phantom_data: PhantomData, + }) + } +} diff --git a/libafl/src/stages/calibrate.rs b/libafl/src/stages/calibrate.rs index 6b19043532..c138758e16 100644 --- a/libafl/src/stages/calibrate.rs +++ b/libafl/src/stages/calibrate.rs @@ -1,6 +1,10 @@ //! The calibration stage. The fuzzer measures the average exec time and the bitmap size. -use alloc::{borrow::Cow, vec::Vec}; +use alloc::{ + borrow::{Cow, ToOwned}, + string::ToString, + vec::Vec, +}; use core::{fmt::Debug, marker::PhantomData, time::Duration}; use hashbrown::HashSet; @@ -14,10 +18,11 @@ use crate::{ executors::{Executor, ExitKind, HasObservers}, feedbacks::{map::MapFeedbackMetadata, HasObserverHandle}, fuzzer::Evaluator, + inputs::UsesInput, monitors::{AggregatorOps, UserStats, UserStatsValue}, observers::{MapObserver, ObserversTuple}, schedulers::powersched::SchedulerMetadata, - stages::{ExecutionCountRestartHelper, Stage}, + stages::{RetryCountRestartHelper, Stage}, state::{HasCorpus, HasCurrentTestcase, HasExecutions, UsesState}, Error, HasMetadata, HasNamedMetadata, }; @@ -75,7 +80,6 @@ pub struct CalibrationStage { stage_max: usize, /// If we should track stability track_stability: bool, - restart_helper: ExecutionCountRestartHelper, phantom: PhantomData<(E, O, OT)>, } @@ -95,10 +99,12 @@ where EM: EventFirer, O: MapObserver, C: AsRef, - for<'de> ::Entry: Serialize + Deserialize<'de> + 'static, - OT: ObserversTuple, - Self::State: HasCorpus + HasMetadata + HasNamedMetadata + HasExecutions, + for<'de> ::Entry: + Serialize + Deserialize<'de> + 'static + Default + Debug + Bounded, + OT: ObserversTuple, + E::State: HasCorpus + HasMetadata + HasNamedMetadata + HasExecutions + HasCurrentTestcase, Z: Evaluator, + <::State as HasCorpus>::Corpus: Corpus, //delete me { #[inline] #[allow( @@ -125,8 +131,6 @@ where let mut iter = self.stage_max; // If we restarted after a timeout or crash, do less iterations. - iter -= usize::try_from(self.restart_helper.execs_since_progress_start(state)?)?; - let input = state.current_input_cloned()?; // Run once to get the initial calibration map @@ -206,16 +210,16 @@ where .observers_mut() .post_exec_all(state, &input, &exit_kind)?; - if self.track_stability { + if self.track_stability && exit_kind != ExitKind::Timeout { let map = &executor.observers()[&self.map_observer_handle] .as_ref() .to_vec(); - let history_map = &mut state + let map_state = state .named_metadata_map_mut() .get_mut::>(&self.map_name) - .unwrap() - .history_map; + .unwrap(); + let history_map = &mut map_state.history_map; if history_map.len() < map_first_len { history_map.resize(map_first_len, O::Entry::default()); @@ -227,6 +231,10 @@ where .enumerate() { if *first != *cur && *history != O::Entry::max_value() { + // If we just hit a history map entry that was not covered before, but is now flagged as flaky, + // we need to make sure the `num_covered_map_indexes` is kept in sync. + map_state.num_covered_map_indexes += + usize::from(*history == O::Entry::default()); *history = O::Entry::max_value(); unstable_entries.push(idx); }; @@ -259,9 +267,15 @@ where let observers = executor.observers(); let map = observers[&self.map_observer_handle].as_ref(); - let mut bitmap_size = map.count_bytes(); - assert!(bitmap_size != 0); - bitmap_size = bitmap_size.max(1); // just don't make it 0 because we take log2 of it later. + let bitmap_size = map.count_bytes(); + + if bitmap_size < 1 { + return Err(Error::invalid_corpus( + "This testcase does not trigger any edges. Check your instrumentation!" + .to_string(), + )); + } + let psmeta = state .metadata_map_mut() .get_mut::() @@ -307,8 +321,6 @@ where data.set_handicap(handicap); } - *state.executions_mut() += u64::try_from(i).unwrap(); - // Send the stability event to the broker if unstable_found { if let Some(meta) = state.metadata_map().get::() { @@ -317,15 +329,19 @@ where map_first_filled_count, 0, "The map's filled count must never be 0" ); + // In theory `map_first_filled_count - unstable_entries` could be negative. + // Because `map_first_filled_count` is the filled count of just one single run. + // While the `unstable_entries` is the number of all the unstable entries across multiple runs. + // If the target is very unstable (~100%) then this would hit more edges than `map_first_filled_count`. + // But even in that case, we don't allow negative stability and just show 0% here. + let stable_count: u64 = + map_first_filled_count.saturating_sub(unstable_entries) as u64; mgr.fire( state, Event::UpdateUserStats { name: Cow::from("stability"), value: UserStats::new( - UserStatsValue::Ratio( - (map_first_filled_count - unstable_entries) as u64, - map_first_filled_count as u64, - ), + UserStatsValue::Ratio(stable_count, map_first_filled_count as u64), AggregatorOps::Avg, ), phantom: PhantomData, @@ -352,14 +368,18 @@ where Ok(()) } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - // TODO: Make sure this is the correct way / there may be a better way? - self.restart_helper.restart_progress_should_run(state) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + // Calibration stage disallow restarts + // If a testcase that causes crash/timeout in the queue, we need to remove it from the queue immediately. + RetryCountRestartHelper::no_retry(state, &self.name) + + // todo + // remove this guy from corpus queue } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { // TODO: Make sure this is the correct way / there may be a better way? - self.restart_helper.clear_restart_progress(state) + RetryCountRestartHelper::clear_progress(state, &self.name) } } @@ -368,7 +388,7 @@ where O: MapObserver, for<'it> O: AsIter<'it, Item = O::Entry>, C: AsRef, - OT: ObserversTuple<::State>, + OT: ObserversTuple<::Input, ::State>, E: UsesState, { /// Create a new [`CalibrationStage`]. @@ -377,14 +397,16 @@ where where F: HasObserverHandle + Named, { + let map_name = map_feedback.name().clone(); Self { map_observer_handle: map_feedback.observer_handle().clone(), - map_name: map_feedback.name().clone(), + map_name: map_name.clone(), stage_max: CAL_STAGE_START, track_stability: true, - restart_helper: ExecutionCountRestartHelper::default(), phantom: PhantomData, - name: Cow::Borrowed(CALIBRATION_STAGE_NAME), + name: Cow::Owned( + CALIBRATION_STAGE_NAME.to_owned() + ":" + map_name.into_owned().as_str(), + ), } } @@ -394,15 +416,9 @@ where where F: HasObserverHandle + Named, { - Self { - map_observer_handle: map_feedback.observer_handle().clone(), - map_name: map_feedback.name().clone(), - stage_max: CAL_STAGE_START, - track_stability: false, - restart_helper: ExecutionCountRestartHelper::default(), - phantom: PhantomData, - name: Cow::Borrowed(CALIBRATION_STAGE_NAME), - } + let mut ret = Self::new(map_feedback); + ret.track_stability = false; + ret } } diff --git a/libafl/src/stages/colorization.rs b/libafl/src/stages/colorization.rs index a3f8844a69..a5ba2b9bc7 100644 --- a/libafl/src/stages/colorization.rs +++ b/libafl/src/stages/colorization.rs @@ -1,5 +1,9 @@ //! The colorization stage from `colorization()` in afl++ -use alloc::{borrow::Cow, collections::binary_heap::BinaryHeap, vec::Vec}; +use alloc::{ + borrow::{Cow, ToOwned}, + collections::binary_heap::BinaryHeap, + vec::Vec, +}; use core::{cmp::Ordering, fmt::Debug, marker::PhantomData, ops::Range}; use libafl_bolts::{ @@ -10,12 +14,14 @@ use libafl_bolts::{ use serde::{Deserialize, Serialize}; use crate::{ + corpus::Corpus, events::EventFirer, executors::{Executor, HasObservers}, - inputs::HasMutatorBytes, + inputs::{HasMutatorBytes, UsesInput}, mutators::mutations::buffer_copy, + nonzero, observers::{MapObserver, ObserversTuple}, - stages::{RetryRestartHelper, Stage}, + stages::{RetryCountRestartHelper, Stage}, state::{HasCorpus, HasCurrentTestcase, HasRand, UsesState}, Error, HasMetadata, HasNamedMetadata, }; @@ -83,11 +89,13 @@ impl Stage for ColorizationStage where EM: UsesState + EventFirer, E: HasObservers + Executor, - Self::State: HasCorpus + HasMetadata + HasRand + HasNamedMetadata, + E::State: HasCorpus + HasMetadata + HasRand + HasNamedMetadata, + E::Observers: ObserversTuple<::Input, ::State>, E::Input: HasMutatorBytes, O: MapObserver, C: AsRef + Named, Z: UsesState, + <::State as HasCorpus>::Corpus: Corpus, //delete me { #[inline] #[allow(clippy::let_and_return)] @@ -104,14 +112,15 @@ where Ok(()) } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - // TODO this stage needs a proper resume - RetryRestartHelper::restart_progress_should_run(state, self, 3) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + // This is a deterministic stage + // Once it failed, then don't retry, + // It will just fail again + RetryCountRestartHelper::no_retry(state, &self.name) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - // TODO this stage needs a proper resume - RetryRestartHelper::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + RetryCountRestartHelper::clear_progress(state, &self.name) } } @@ -160,9 +169,11 @@ where O: MapObserver, C: AsRef + Named, E: HasObservers + Executor, - ::State: HasCorpus + HasMetadata + HasRand, + E::Observers: ObserversTuple<::Input, ::State>, + ::State: HasCorpus + HasMetadata + HasRand, E::Input: HasMutatorBytes, Z: UsesState::State>, + <::State as HasCorpus>::Corpus: Corpus, //delete me { #[inline] #[allow(clippy::let_and_return)] @@ -179,20 +190,11 @@ where // This is the buffer we'll randomly mutate during type_replace let mut changed = input.clone(); - // input will be consumed so clone it - let consumed_input = input.clone(); - // First, run orig_input once and get the original hash // Idea: No need to do this every time - let orig_hash = Self::get_raw_map_hash_run( - fuzzer, - executor, - state, - manager, - consumed_input, - observer_handle, - )?; + let orig_hash = + Self::get_raw_map_hash_run(fuzzer, executor, state, manager, &input, observer_handle)?; let changed_bytes = changed.bytes_mut(); let input_len = changed_bytes.len(); @@ -230,13 +232,12 @@ where ); } - let consumed_input = input.clone(); let changed_hash = Self::get_raw_map_hash_run( fuzzer, executor, state, manager, - consumed_input, + &input, observer_handle, )?; @@ -309,9 +310,10 @@ where #[must_use] /// Creates a new [`ColorizationStage`] pub fn new(map_observer: &C) -> Self { + let obs_name = map_observer.name().clone().into_owned(); Self { map_observer_handle: map_observer.handle(), - name: Cow::Borrowed(COLORIZATION_STAGE_NAME), + name: Cow::Owned(COLORIZATION_STAGE_NAME.to_owned() + ":" + obs_name.as_str()), phantom: PhantomData, } } @@ -322,12 +324,12 @@ where executor: &mut E, state: &mut ::State, manager: &mut EM, - input: E::Input, + input: &E::Input, observer_handle: &Handle, ) -> Result { - executor.observers_mut().pre_exec_all(state, &input)?; + executor.observers_mut().pre_exec_all(state, input)?; - let exit_kind = executor.run_target(fuzzer, state, manager, &input)?; + let exit_kind = executor.run_target(fuzzer, state, manager, input)?; let observers = executor.observers(); let observer = observers[observer_handle].as_ref(); @@ -336,7 +338,7 @@ where executor .observers_mut() - .post_exec_all(state, &input, &exit_kind)?; + .post_exec_all(state, input, &exit_kind)?; // let observers = executor.observers(); // fuzzer.process_execution(state, manager, input, observers, &exit_kind, true)?; @@ -352,11 +354,11 @@ where let c = match bytes[idx] { 0x41..=0x46 => { // 'A' + 1 + rand('F' - 'A') - 0x41 + 1 + state.rand_mut().below(5) as u8 + 0x41 + 1 + state.rand_mut().below(nonzero!(5)) as u8 } 0x61..=0x66 => { // 'a' + 1 + rand('f' - 'a') - 0x61 + 1 + state.rand_mut().below(5) as u8 + 0x61 + 1 + state.rand_mut().below(nonzero!(5)) as u8 } 0x30 => { // '0' -> '1' @@ -368,35 +370,35 @@ where } 0x32..=0x39 => { // '2' + 1 + rand('9' - '2') - 0x32 + 1 + state.rand_mut().below(7) as u8 + 0x32 + 1 + state.rand_mut().below(nonzero!(7)) as u8 } 0x47..=0x5a => { // 'G' + 1 + rand('Z' - 'G') - 0x47 + 1 + state.rand_mut().below(19) as u8 + 0x47 + 1 + state.rand_mut().below(nonzero!(19)) as u8 } 0x67..=0x7a => { // 'g' + 1 + rand('z' - 'g') - 0x67 + 1 + state.rand_mut().below(19) as u8 + 0x67 + 1 + state.rand_mut().below(nonzero!(19)) as u8 } 0x21..=0x2a => { // '!' + 1 + rand('*' - '!'); - 0x21 + 1 + state.rand_mut().below(9) as u8 + 0x21 + 1 + state.rand_mut().below(nonzero!(9)) as u8 } 0x2c..=0x2e => { // ',' + 1 + rand('.' - ',') - 0x2c + 1 + state.rand_mut().below(2) as u8 + 0x2c + 1 + state.rand_mut().below(nonzero!(2)) as u8 } 0x3a..=0x40 => { // ':' + 1 + rand('@' - ':') - 0x3a + 1 + state.rand_mut().below(6) as u8 + 0x3a + 1 + state.rand_mut().below(nonzero!(6)) as u8 } 0x5b..=0x60 => { // '[' + 1 + rand('`' - '[') - 0x5b + 1 + state.rand_mut().below(5) as u8 + 0x5b + 1 + state.rand_mut().below(nonzero!(5)) as u8 } 0x7b..=0x7e => { // '{' + 1 + rand('~' - '{') - 0x7b + 1 + state.rand_mut().below(3) as u8 + 0x7b + 1 + state.rand_mut().below(nonzero!(3)) as u8 } 0x2b => { // '+' -> '/' diff --git a/libafl/src/stages/concolic.rs b/libafl/src/stages/concolic.rs index 515b615870..ccb8a4a36f 100644 --- a/libafl/src/stages/concolic.rs +++ b/libafl/src/stages/concolic.rs @@ -1,8 +1,7 @@ //! This module contains the `concolic` stages, which can trace a target using symbolic execution //! and use the results for fuzzer input and mutations. //! - -use alloc::borrow::Cow; +use alloc::borrow::{Cow, ToOwned}; #[cfg(feature = "concolic_mutation")] use alloc::{string::ToString, vec::Vec}; #[cfg(feature = "concolic_mutation")] @@ -18,9 +17,10 @@ use crate::monitors::PerfFeature; #[cfg(all(feature = "introspection", feature = "concolic_mutation"))] use crate::state::HasClientPerfMonitor; use crate::{ + corpus::Corpus, executors::{Executor, HasObservers}, - observers::concolic::ConcolicObserver, - stages::{RetryRestartHelper, Stage, TracingStage}, + observers::{concolic::ConcolicObserver, ObserversTuple}, + stages::{RetryCountRestartHelper, Stage, TracingStage}, state::{HasCorpus, HasCurrentTestcase, HasExecutions, UsesState}, Error, HasMetadata, HasNamedMetadata, }; @@ -29,7 +29,6 @@ use crate::{ inputs::HasMutatorBytes, mark_feature_time, observers::concolic::{ConcolicMetadata, SymExpr, SymExprRef}, - stages::ExecutionCountRestartHelper, start_timer, state::State, Evaluator, @@ -38,6 +37,7 @@ use crate::{ /// Wraps a [`TracingStage`] to add concolic observing. #[derive(Clone, Debug)] pub struct ConcolicTracingStage<'a, EM, TE, Z> { + name: Cow<'static, str>, inner: TracingStage, observer_handle: Handle>, } @@ -49,10 +49,12 @@ where type State = TE::State; } +/// The name for concolic tracer +pub const CONCOLIC_TRACING_STAGE_NAME: &str = "concolictracing"; + impl Named for ConcolicTracingStage<'_, EM, TE, Z> { fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed("ConcolicTracingStage"); - &NAME + &self.name } } @@ -61,8 +63,10 @@ where E: UsesState, EM: UsesState, TE: Executor + HasObservers, - Self::State: HasExecutions + HasCorpus + HasNamedMetadata, + TE::Observers: ObserversTuple::State>, + TE::State: HasExecutions + HasCorpus + HasNamedMetadata + HasCurrentTestcase, Z: UsesState, + <::State as HasCorpus>::Corpus: Corpus, //delete me { #[inline] fn perform( @@ -83,12 +87,15 @@ where Ok(()) } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - RetryRestartHelper::restart_progress_should_run(state, self, 3) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + // This is a deterministic stage + // Once it failed, then don't retry, + // It will just fail again + RetryCountRestartHelper::no_retry(state, &self.name) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - RetryRestartHelper::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + RetryCountRestartHelper::clear_progress(state, &self.name) } } @@ -99,9 +106,13 @@ impl<'a, EM, TE, Z> ConcolicTracingStage<'a, EM, TE, Z> { inner: TracingStage, observer_handle: Handle>, ) -> Self { + let observer_name = observer_handle.name().clone(); Self { inner, observer_handle, + name: Cow::Owned( + CONCOLIC_TRACING_STAGE_NAME.to_owned() + ":" + observer_name.into_owned().as_str(), + ), } } } @@ -351,10 +362,9 @@ fn generate_mutations(iter: impl Iterator) -> Vec< /// A mutational stage that uses Z3 to solve concolic constraints attached to the [`crate::corpus::Testcase`] by the [`ConcolicTracingStage`]. #[cfg(feature = "concolic_mutation")] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub struct SimpleConcolicMutationalStage { - /// The helper keeps track of progress for timeouting/restarting targets - restart_helper: ExecutionCountRestartHelper, + name: Cow<'static, str>, phantom: PhantomData, } @@ -366,6 +376,21 @@ where type State = Z::State; } +#[cfg(feature = "concolic_mutation")] +/// The unique id for this stage +static mut SIMPLE_CONCOLIC_MUTATIONAL_ID: usize = 0; + +#[cfg(feature = "concolic_mutation")] +/// The name for concolic mutation stage +pub const SIMPLE_CONCOLIC_MUTATIONAL_NAME: &str = "concolicmutation"; + +#[cfg(feature = "concolic_mutation")] +impl Named for SimpleConcolicMutationalStage { + fn name(&self) -> &Cow<'static, str> { + &self.name + } +} + #[cfg(feature = "concolic_mutation")] impl Stage for SimpleConcolicMutationalStage where @@ -373,7 +398,9 @@ where EM: UsesState, Z: Evaluator, Z::Input: HasMutatorBytes, - Self::State: State + HasExecutions + HasCorpus + HasMetadata, + Z::State: + State + HasExecutions + HasCorpus + HasMetadata + HasNamedMetadata + HasCurrentTestcase, + <::State as HasCorpus>::Corpus: Corpus, //delete me { #[inline] fn perform( @@ -396,11 +423,8 @@ where mutations }); - let post_restart_skip_cnt = - usize::try_from(self.restart_helper.execs_since_progress_start(state)?)?; - if let Some(mutations) = mutations { - for mutation in mutations.into_iter().skip(post_restart_skip_cnt) { + for mutation in mutations { let mut input_copy = state.current_input_cloned()?; for (index, new_byte) in mutation { input_copy.bytes_mut()[index] = new_byte; @@ -413,21 +437,34 @@ where } #[inline] - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - self.restart_helper.restart_progress_should_run(state) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + // This is a deterministic stage + // Once it failed, then don't retry, + // It will just fail again + RetryCountRestartHelper::no_retry(state, &self.name) } #[inline] - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - self.restart_helper.clear_restart_progress(state) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + RetryCountRestartHelper::clear_progress(state, &self.name) } } #[cfg(feature = "concolic_mutation")] -impl Default for SimpleConcolicMutationalStage { - fn default() -> Self { +impl SimpleConcolicMutationalStage { + #[must_use] + /// Construct this stage + pub fn new() -> Self { + // unsafe but impossible that you create two threads both instantiating this instance + let stage_id = unsafe { + let ret = SIMPLE_CONCOLIC_MUTATIONAL_ID; + SIMPLE_CONCOLIC_MUTATIONAL_ID += 1; + ret + }; Self { - restart_helper: ExecutionCountRestartHelper::default(), + name: Cow::Owned( + SIMPLE_CONCOLIC_MUTATIONAL_NAME.to_owned() + ":" + stage_id.to_string().as_str(), + ), phantom: PhantomData, } } diff --git a/libafl/src/stages/dump.rs b/libafl/src/stages/dump.rs index 7976226257..ecfc2c2a10 100644 --- a/libafl/src/stages/dump.rs +++ b/libafl/src/stages/dump.rs @@ -9,7 +9,6 @@ use serde::{Deserialize, Serialize}; use crate::{ corpus::{Corpus, CorpusId}, - inputs::UsesInput, stages::Stage, state::{HasCorpus, HasRand, HasSolutions, UsesState}, Error, HasMetadata, @@ -46,11 +45,13 @@ where impl Stage for DumpToDiskStage where - CB: FnMut(&::Input, &Self::State) -> Vec, + CB: FnMut(&Self::Input, &Self::State) -> Vec, EM: UsesState, E: UsesState, Z: UsesState, - Self::State: HasCorpus + HasSolutions + HasRand + HasMetadata, + EM::State: HasCorpus + HasSolutions + HasRand + HasMetadata, + <::State as HasCorpus>::Corpus: Corpus, //delete me + <::State as HasSolutions>::Solutions: Corpus, //delete me { #[inline] fn perform( @@ -60,68 +61,17 @@ where state: &mut Self::State, _manager: &mut EM, ) -> Result<(), Error> { - let (mut corpus_idx, mut solutions_idx) = - if let Some(meta) = state.metadata_map().get::() { - ( - meta.last_corpus.and_then(|x| state.corpus().next(x)), - meta.last_solution.and_then(|x| state.solutions().next(x)), - ) - } else { - (state.corpus().first(), state.solutions().first()) - }; - - while let Some(i) = corpus_idx { - let mut testcase = state.corpus().get(i)?.borrow_mut(); - state.corpus().load_input_into(&mut testcase)?; - let bytes = (self.to_bytes)(testcase.input().as_ref().unwrap(), state); - - let fname = self.corpus_dir.join(format!( - "id_{i}_{}", - testcase - .filename() - .as_ref() - .map_or_else(|| "unnamed", String::as_str) - )); - let mut f = File::create(fname)?; - drop(f.write_all(&bytes)); - - corpus_idx = state.corpus().next(i); - } - - while let Some(i) = solutions_idx { - let mut testcase = state.solutions().get(i)?.borrow_mut(); - state.solutions().load_input_into(&mut testcase)?; - let bytes = (self.to_bytes)(testcase.input().as_ref().unwrap(), state); - - let fname = self.solutions_dir.join(format!( - "id_{i}_{}", - testcase - .filename() - .as_ref() - .map_or_else(|| "unnamed", String::as_str) - )); - let mut f = File::create(fname)?; - drop(f.write_all(&bytes)); - - solutions_idx = state.solutions().next(i); - } - - state.add_metadata(DumpToDiskMetadata { - last_corpus: state.corpus().last(), - last_solution: state.solutions().last(), - }); - - Ok(()) + self.dump_state_to_disk(state) } #[inline] - fn restart_progress_should_run(&mut self, _state: &mut Self::State) -> Result { + fn should_restart(&mut self, _state: &mut Self::State) -> Result { // Not executing the target, so restart safety is not needed Ok(true) } #[inline] - fn clear_restart_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { + fn clear_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { // Not executing the target, so restart safety is not needed Ok(()) } @@ -131,7 +81,9 @@ impl DumpToDiskStage where EM: UsesState, Z: UsesState, - ::State: HasCorpus + HasSolutions + HasRand + HasMetadata, + ::State: HasCorpus + HasSolutions + HasRand + HasMetadata, + <::State as HasCorpus>::Corpus: Corpus, + <::State as HasSolutions>::Solutions: Corpus, { /// Create a new [`DumpToDiskStage`] pub fn new(to_bytes: CB, corpus_dir: A, solutions_dir: B) -> Result @@ -164,4 +116,66 @@ where phantom: PhantomData, }) } + + #[inline] + fn dump_state_to_disk(&mut self, state: &mut ::State) -> Result<(), Error> + where + CB: FnMut( + &<<::State as HasCorpus>::Corpus as Corpus>::Input, + &::State, + ) -> Vec, + { + let (mut corpus_id, mut solutions_id) = + if let Some(meta) = state.metadata_map().get::() { + ( + meta.last_corpus.and_then(|x| state.corpus().next(x)), + meta.last_solution.and_then(|x| state.solutions().next(x)), + ) + } else { + (state.corpus().first(), state.solutions().first()) + }; + + while let Some(i) = corpus_id { + let mut testcase = state.corpus().get(i)?.borrow_mut(); + state.corpus().load_input_into(&mut testcase)?; + let bytes = (self.to_bytes)(testcase.input().as_ref().unwrap(), state); + + let fname = self.corpus_dir.join(format!( + "id_{i}_{}", + testcase + .filename() + .as_ref() + .map_or_else(|| "unnamed", String::as_str) + )); + let mut f = File::create(fname)?; + drop(f.write_all(&bytes)); + + corpus_id = state.corpus().next(i); + } + + while let Some(current_id) = solutions_id { + let mut testcase = state.solutions().get(current_id)?.borrow_mut(); + state.solutions().load_input_into(&mut testcase)?; + let bytes = (self.to_bytes)(testcase.input().as_ref().unwrap(), state); + + let fname = self.solutions_dir.join(format!( + "id_{current_id}_{}", + testcase + .filename() + .as_ref() + .map_or_else(|| "unnamed", String::as_str) + )); + let mut f = File::create(fname)?; + drop(f.write_all(&bytes)); + + solutions_id = state.solutions().next(current_id); + } + + state.add_metadata(DumpToDiskMetadata { + last_corpus: state.corpus().last(), + last_solution: state.solutions().last(), + }); + + Ok(()) + } } diff --git a/libafl/src/stages/generalization.rs b/libafl/src/stages/generalization.rs index d1bcadbd80..6b2d54d897 100644 --- a/libafl/src/stages/generalization.rs +++ b/libafl/src/stages/generalization.rs @@ -1,6 +1,9 @@ //! The tracing stage can trace the target and enrich a [`crate::corpus::Testcase`] with metadata, for example for `CmpLog`. -use alloc::{borrow::Cow, vec::Vec}; +use alloc::{ + borrow::{Cow, ToOwned}, + vec::Vec, +}; use core::{fmt::Debug, marker::PhantomData}; use libafl_bolts::{ @@ -16,7 +19,7 @@ use crate::{ mark_feature_time, observers::{CanTrack, MapObserver, ObserversTuple}, require_novelties_tracking, - stages::{RetryRestartHelper, Stage}, + stages::{RetryCountRestartHelper, Stage}, start_timer, state::{HasCorpus, HasExecutions, UsesState}, Error, HasMetadata, HasNamedMetadata, @@ -40,9 +43,13 @@ fn find_next_char(list: &[Option], mut idx: usize, ch: u8) -> usize { idx } +/// The name for generalization stage +pub static GENERALIZATION_STAGE_NAME: &str = "generalization"; + /// A stage that runs a tracer executor #[derive(Clone, Debug)] pub struct GeneralizationStage { + name: Cow<'static, str>, map_observer_handle: Handle, #[allow(clippy::type_complexity)] phantom: PhantomData<(EM, O, OT, Z)>, @@ -50,8 +57,7 @@ pub struct GeneralizationStage { impl Named for GeneralizationStage { fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed("GeneralizationStage"); - &NAME + &self.name } } @@ -67,9 +73,11 @@ where O: MapObserver, C: CanTrack + AsRef + Named, E: Executor + HasObservers, - Self::State: + E::Observers: ObserversTuple::State>, + EM::State: UsesInput + HasExecutions + HasMetadata + HasCorpus + HasNamedMetadata, EM: UsesState, + <::State as HasCorpus>::Corpus: Corpus, //delete me Z: UsesState, { #[inline] @@ -81,7 +89,7 @@ where state: &mut Self::State, manager: &mut EM, ) -> Result<(), Error> { - let Some(corpus_idx) = state.current_corpus_id()? else { + let Some(corpus_id) = state.current_corpus_id()? else { return Err(Error::illegal_state( "state is not currently processing a corpus index", )); @@ -91,7 +99,7 @@ where start_timer!(state); { let corpus = state.corpus(); - let mut testcase = corpus.get(corpus_idx)?.borrow_mut(); + let mut testcase = corpus.get(corpus_id)?.borrow_mut(); if testcase.scheduled_count() > 0 { return Ok(()); } @@ -99,14 +107,19 @@ where corpus.load_input_into(&mut testcase)?; } mark_feature_time!(state, PerfFeature::GetInputFromCorpus); - let mut entry = state.corpus().get(corpus_idx)?.borrow_mut(); + let mut entry = state.corpus().get(corpus_id)?.borrow_mut(); let input = entry.input_mut().as_mut().unwrap(); let payload: Vec<_> = input.bytes().iter().map(|&x| Some(x)).collect(); + + if payload.len() > MAX_GENERALIZED_LEN { + return Ok(()); + } + let original = input.clone(); let meta = entry.metadata_map().get::().ok_or_else(|| { Error::key_not_found(format!( - "MapNoveltiesMetadata needed for GeneralizationStage not found in testcase #{corpus_idx} (check the arguments of MapFeedback::new(...))" + "MapNoveltiesMetadata needed for GeneralizationStage not found in testcase #{corpus_id} (check the arguments of MapFeedback::new(...))" )) })?; if meta.as_slice().is_empty() { @@ -303,32 +316,30 @@ where b'"', )?; - if payload.len() <= MAX_GENERALIZED_LEN { - // Save the modified input in the corpus - { - let meta = GeneralizedInputMetadata::generalized_from_options(&payload); + // Save the modified input in the corpus + { + let meta = GeneralizedInputMetadata::generalized_from_options(&payload); - assert!(meta.generalized().first() == Some(&GeneralizedItem::Gap)); - assert!(meta.generalized().last() == Some(&GeneralizedItem::Gap)); + assert!(meta.generalized().first() == Some(&GeneralizedItem::Gap)); + assert!(meta.generalized().last() == Some(&GeneralizedItem::Gap)); - let mut entry = state.corpus().get(corpus_idx)?.borrow_mut(); - entry.metadata_map_mut().insert(meta); - } + let mut entry = state.corpus().get(corpus_id)?.borrow_mut(); + entry.metadata_map_mut().insert(meta); } Ok(()) } #[inline] - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { + fn should_restart(&mut self, state: &mut Self::State) -> Result { // TODO: We need to be able to resume better if something crashes or times out - RetryRestartHelper::restart_progress_should_run(state, self, 3) + RetryCountRestartHelper::should_restart(state, &self.name, 3) } #[inline] - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { // TODO: We need to be able to resume better if something crashes or times out - RetryRestartHelper::clear_restart_progress(state, self) + RetryCountRestartHelper::clear_progress(state, &self.name) } } @@ -337,15 +348,19 @@ where EM: UsesState, O: MapObserver, C: CanTrack + AsRef + Named, - OT: ObserversTuple<::State>, ::State: UsesInput + HasExecutions + HasMetadata + HasCorpus, + OT: ObserversTuple::State>, { /// Create a new [`GeneralizationStage`]. #[must_use] pub fn new(map_observer: &C) -> Self { require_novelties_tracking!("GeneralizationStage", C); + let name = map_observer.name().clone(); Self { + name: Cow::Owned( + GENERALIZATION_STAGE_NAME.to_owned() + ":" + name.into_owned().as_str(), + ), map_observer_handle: map_observer.handle(), phantom: PhantomData, } @@ -361,8 +376,9 @@ where input: &BytesInput, ) -> Result where - E: Executor + HasObservers::State>, - Z: UsesState::State>, + E: Executor::State> + HasObservers, + E::Observers: ObserversTuple::State>, + Z: UsesState, { start_timer!(state); executor.observers_mut().pre_exec_all(state, input)?; @@ -372,8 +388,6 @@ where let exit_kind = executor.run_target(fuzzer, state, manager, input)?; mark_feature_time!(state, PerfFeature::TargetExecution); - *state.executions_mut() += 1; - start_timer!(state); executor .observers_mut() @@ -405,8 +419,8 @@ where split_char: u8, ) -> Result<(), Error> where - E: Executor + HasObservers::State>, - Z: UsesState::State>, + E: Executor::State> + HasObservers, + Z: UsesState, { let mut start = 0; while start < payload.len() { @@ -444,8 +458,8 @@ where closing_char: u8, ) -> Result<(), Error> where - E: Executor + HasObservers::State>, - Z: UsesState::State>, + E: Executor::State> + HasObservers, + Z: UsesState, { let mut index = 0; while index < payload.len() { diff --git a/libafl/src/stages/generation.rs b/libafl/src/stages/generation.rs index 46f7200c10..a2fb61916f 100644 --- a/libafl/src/stages/generation.rs +++ b/libafl/src/stages/generation.rs @@ -1,3 +1,5 @@ +//! The [`GenStage`] generates a single input and evaluates it. +//! //! A [`Stage`] that generates a single input via a //! [`crate::generators::Generator`] and evaluates it using the fuzzer, possibly //! adding it to the corpus. @@ -54,11 +56,13 @@ where Ok(()) } - fn restart_progress_should_run(&mut self, _state: &mut Self::State) -> Result { + fn should_restart(&mut self, _state: &mut Self::State) -> Result { + // It's a random generation stage + // so you can restart for whatever times you want Ok(true) } - fn clear_restart_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { + fn clear_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { Ok(()) } } diff --git a/libafl/src/stages/logics.rs b/libafl/src/stages/logics.rs index cb1ff665f5..c05b818774 100644 --- a/libafl/src/stages/logics.rs +++ b/libafl/src/stages/logics.rs @@ -3,17 +3,17 @@ use core::marker::PhantomData; use crate::{ - stages::{HasCurrentStage, HasNestedStageStatus, Stage, StageId, StagesTuple}, + stages::{HasCurrentStageId, HasNestedStageStatus, Stage, StageId, StagesTuple}, state::UsesState, Error, }; /// Progress for nested stages. This merely enters/exits the inner stage's scope. #[derive(Debug)] -pub struct NestedStageRestartHelper; +pub struct NestedStageRetryCountRestartHelper; -impl NestedStageRestartHelper { - fn restart_progress_should_run(state: &mut S, _stage: &ST) -> Result +impl NestedStageRetryCountRestartHelper { + fn should_restart(state: &mut S, _stage: &ST) -> Result where S: HasNestedStageStatus, { @@ -21,7 +21,7 @@ impl NestedStageRestartHelper { Ok(true) } - fn clear_restart_progress(state: &mut S, _stage: &ST) -> Result<(), Error> + fn clear_progress(state: &mut S, _stage: &ST) -> Result<(), Error> where S: HasNestedStageStatus, { @@ -61,7 +61,7 @@ where state: &mut Self::State, manager: &mut EM, ) -> Result<(), Error> { - while state.current_stage_idx()?.is_some() + while state.current_stage_id()?.is_some() || (self.closure)(fuzzer, executor, state, manager)? { self.stages.perform_all(fuzzer, executor, state, manager)?; @@ -70,12 +70,12 @@ where Ok(()) } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - NestedStageRestartHelper::restart_progress_should_run(state, self) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + NestedStageRetryCountRestartHelper::should_restart(state, self) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - NestedStageRestartHelper::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + NestedStageRetryCountRestartHelper::clear_progress(state, self) } } @@ -126,7 +126,7 @@ where state: &mut Self::State, manager: &mut EM, ) -> Result<(), Error> { - if state.current_stage_idx()?.is_some() || (self.closure)(fuzzer, executor, state, manager)? + if state.current_stage_id()?.is_some() || (self.closure)(fuzzer, executor, state, manager)? { self.if_stages .perform_all(fuzzer, executor, state, manager)?; @@ -134,12 +134,12 @@ where Ok(()) } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - NestedStageRestartHelper::restart_progress_should_run(state, self) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + NestedStageRetryCountRestartHelper::should_restart(state, self) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - NestedStageRestartHelper::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + NestedStageRetryCountRestartHelper::clear_progress(state, self) } } @@ -192,21 +192,21 @@ where state: &mut Self::State, manager: &mut EM, ) -> Result<(), Error> { - let current = state.current_stage_idx()?; + let current = state.current_stage_id()?; let fresh = current.is_none(); let closure_return = fresh && (self.closure)(fuzzer, executor, state, manager)?; if current == Some(StageId(0)) || closure_return { if fresh { - state.set_current_stage_idx(StageId(0))?; + state.set_current_stage_id(StageId(0))?; } state.enter_inner_stage()?; self.if_stages .perform_all(fuzzer, executor, state, manager)?; } else { if fresh { - state.set_current_stage_idx(StageId(1))?; + state.set_current_stage_id(StageId(1))?; } state.enter_inner_stage()?; self.else_stages @@ -214,17 +214,17 @@ where } state.exit_inner_stage()?; - state.clear_stage()?; + state.clear_stage_id()?; Ok(()) } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - NestedStageRestartHelper::restart_progress_should_run(state, self) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + NestedStageRetryCountRestartHelper::should_restart(state, self) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - NestedStageRestartHelper::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + NestedStageRetryCountRestartHelper::clear_progress(state, self) } } @@ -280,12 +280,12 @@ where } } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - NestedStageRestartHelper::restart_progress_should_run(state, self) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + NestedStageRetryCountRestartHelper::should_restart(state, self) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - NestedStageRestartHelper::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + NestedStageRetryCountRestartHelper::clear_progress(state, self) } } diff --git a/libafl/src/stages/mod.rs b/libafl/src/stages/mod.rs index be65359111..5dcad0aaca 100644 --- a/libafl/src/stages/mod.rs +++ b/libafl/src/stages/mod.rs @@ -4,9 +4,16 @@ A well-known [`Stage`], for example, is the mutational stage, running multiple [ Other stages may enrich [`crate::corpus::Testcase`]s with metadata. */ -use alloc::{borrow::Cow, boxed::Box, vec::Vec}; +use alloc::{ + borrow::{Cow, ToOwned}, + boxed::Box, + string::ToString, + vec::Vec, +}; use core::{fmt, marker::PhantomData}; +#[cfg(feature = "std")] +pub use afl_stats::{AflStatsStage, CalibrationTime, FuzzTime, SyncTime}; pub use calibrate::CalibrationStage; pub use colorization::*; #[cfg(all(feature = "std", unix))] @@ -26,9 +33,11 @@ pub use logics::*; pub use mutational::{MutationalStage, StdMutationalStage}; pub use power::{PowerMutationalStage, StdPowerMutationalStage}; use serde::{Deserialize, Serialize}; -pub use stats::AflStatsStage; +pub use stats::StatsStage; #[cfg(feature = "std")] pub use sync::*; +#[cfg(feature = "std")] +pub use time_tracker::TimeTrackingStageWrapper; pub use tmin::{ MapEqualityFactory, MapEqualityFeedback, StdTMinMutationalStage, TMinMutationalStage, }; @@ -37,16 +46,18 @@ pub use tuneable::*; use tuple_list::NonEmptyTuple; #[cfg(feature = "unicode")] pub use unicode::*; +#[cfg(feature = "std")] +pub use verify_timeouts::{TimeoutsToVerify, VerifyTimeoutsStage}; use crate::{ corpus::{CorpusId, HasCurrentCorpusId}, - events::{EventFirer, EventRestarter, HasEventManagerId, ProgressReporter}, + events::{EventFirer, EventProcessor, EventRestarter, HasEventManagerId, ProgressReporter}, executors::{Executor, HasObservers}, inputs::UsesInput, observers::ObserversTuple, schedulers::Scheduler, stages::push::PushStage, - state::{HasCorpus, HasExecutions, HasLastReportTime, HasRand, State, UsesState}, + state::{HasCorpus, HasExecutions, HasLastReportTime, HasRand, State, Stoppable, UsesState}, Error, EvaluatorObservers, ExecutesInput, ExecutionProcessor, HasMetadata, HasNamedMetadata, HasScheduler, }; @@ -56,6 +67,8 @@ pub mod mutational; pub mod push; pub mod tmin; +#[cfg(feature = "std")] +pub mod afl_stats; pub mod calibrate; pub mod colorization; #[cfg(all(feature = "std", unix))] @@ -63,17 +76,20 @@ pub mod concolic; #[cfg(feature = "std")] pub mod dump; pub mod generalization; -/// The [`generation::GenStage`] generates a single input and evaluates it. pub mod generation; pub mod logics; pub mod power; pub mod stats; #[cfg(feature = "std")] pub mod sync; +#[cfg(feature = "std")] +pub mod time_tracker; pub mod tracing; pub mod tuneable; #[cfg(feature = "unicode")] pub mod unicode; +#[cfg(feature = "std")] +pub mod verify_timeouts; /// A stage is one step in the fuzzing process. /// Multiple stages will be scheduled one by one for each input. @@ -86,18 +102,19 @@ where /// This method will be called before every call to [`Stage::perform`]. /// Initialize the restart tracking for this stage, _if it is not yet initialized_. /// On restart, this will be called again. - /// As long as [`Stage::clear_restart_progress`], all subsequent calls happen on restart. + /// As long as [`Stage::clear_progress`], all subsequent calls happen on restart. /// Returns `true`, if the stage's [`Stage::perform`] method should run, else `false`. - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result; + fn should_restart(&mut self, state: &mut Self::State) -> Result; /// Clear the current status tracking of the associated stage - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error>; + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error>; /// Run the stage. /// - /// Before a call to perform, [`Stage::restart_progress_should_run`] will be (must be!) called. - /// After returning (so non-target crash or timeout in a restarting case), [`Stage::clear_restart_progress`] gets called. + /// Before a call to perform, [`Stage::should_restart`] will be (must be!) called. + /// After returning (so non-target crash or timeout in a restarting case), [`Stage::clear_progress`] gets called. /// A call to [`Stage::perform_restartable`] will do these things implicitly. + /// DON'T call this function directly except from `preform_restartable` !! fn perform( &mut self, fuzzer: &mut Z, @@ -106,7 +123,7 @@ where manager: &mut EM, ) -> Result<(), Error>; - /// Run the stage, calling [`Stage::restart_progress_should_run`] and [`Stage::clear_restart_progress`] appropriately + /// Run the stage, calling [`Stage::should_restart`] and [`Stage::clear_progress`] appropriately fn perform_restartable( &mut self, fuzzer: &mut Z, @@ -114,10 +131,10 @@ where state: &mut Self::State, manager: &mut EM, ) -> Result<(), Error> { - if self.restart_progress_should_run(state)? { + if self.should_restart(state)? { self.perform(fuzzer, executor, state, manager)?; } - self.clear_restart_progress(state) + self.clear_progress(state) } } @@ -127,9 +144,9 @@ where E: UsesState, EM: UsesState, Z: UsesState, - S: UsesInput + HasCurrentStage, + S: UsesInput + HasCurrentStageId, { - /// Performs all `Stages` in this tuple + /// Performs all `Stages` in this tuple. fn perform_all( &mut self, fuzzer: &mut Z, @@ -144,7 +161,7 @@ where E: UsesState, EM: UsesState, Z: UsesState, - S: UsesInput + HasCurrentStage, + S: UsesInput + HasCurrentStageId, { fn perform_all( &mut self, @@ -153,7 +170,7 @@ where stage: &mut S, _: &mut EM, ) -> Result<(), Error> { - if stage.current_stage_idx()?.is_some() { + if stage.current_stage_id()?.is_some() { Err(Error::illegal_state( "Got to the end of the tuple without completing resume.", )) @@ -168,10 +185,13 @@ where Head: Stage, Tail: StagesTuple + HasConstLen, E: UsesState, - EM: UsesState, + EM: UsesState + EventProcessor, Z: UsesState, - Head::State: HasCurrentStage, + Head::State: HasCurrentStageId, { + /// Performs all stages in the tuple, + /// Checks after every stage if state wants to stop + /// and returns an [`Error::ShuttingDown`] if so fn perform_all( &mut self, fuzzer: &mut Z, @@ -179,32 +199,41 @@ where state: &mut Head::State, manager: &mut EM, ) -> Result<(), Error> { - match state.current_stage_idx()? { + match state.current_stage_id()? { Some(idx) if idx < StageId(Self::LEN) => { // do nothing; we are resuming } Some(idx) if idx == StageId(Self::LEN) => { // perform the stage, but don't set it + + #[allow(clippy::similar_names)] let stage = &mut self.0; stage.perform_restartable(fuzzer, executor, state, manager)?; - state.clear_stage()?; + state.clear_stage_id()?; } Some(idx) if idx > StageId(Self::LEN) => { unreachable!("We should clear the stage index before we get here..."); } // this is None, but the match can't deduce that _ => { - state.set_current_stage_idx(StageId(Self::LEN))?; + state.set_current_stage_id(StageId(Self::LEN))?; + #[allow(clippy::similar_names)] let stage = &mut self.0; stage.perform_restartable(fuzzer, executor, state, manager)?; - state.clear_stage()?; + state.clear_stage_id()?; } } + if state.stop_requested() { + state.discard_stop_request(); + manager.on_shutdown()?; + return Err(Error::shutting_down()); + } + // Execute the remaining stages self.1.perform_all(fuzzer, executor, state, manager) } @@ -220,7 +249,7 @@ where E: UsesState, EM: UsesState, Z: UsesState, - Head::State: HasCurrentStage, + Head::State: HasCurrentStageId, { fn into_vec_reversed( self, @@ -267,10 +296,13 @@ impl StagesTuple for Vec>> where E: UsesState, - EM: UsesState, + EM: UsesState + EventProcessor, Z: UsesState, - S: UsesInput + HasCurrentStage + State, + S: UsesInput + HasCurrentStageId + State, { + /// Performs all stages in the `Vec` + /// Checks after every stage if state wants to stop + /// and returns an [`Error::ShuttingDown`] if so fn perform_all( &mut self, fuzzer: &mut Z, @@ -278,14 +310,25 @@ where state: &mut S, manager: &mut EM, ) -> Result<(), Error> { - self.iter_mut() - .try_for_each(|x| x.perform_restartable(fuzzer, executor, state, manager)) + self.iter_mut().try_for_each(|x| { + if state.stop_requested() { + state.discard_stop_request(); + manager.on_shutdown()?; + return Err(Error::shutting_down()); + } + x.perform_restartable(fuzzer, executor, state, manager) + }) } } +static mut CLOSURE_STAGE_ID: usize = 0; +/// The name for closure stage +pub static CLOSURE_STAGE_NAME: &str = "closure"; + /// A [`Stage`] that will call a closure #[derive(Debug)] pub struct ClosureStage { + name: Cow<'static, str>, closure: CB, phantom: PhantomData<(E, EM, Z)>, } @@ -299,8 +342,7 @@ where impl Named for ClosureStage { fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed(""); - &NAME + &self.name } } @@ -323,14 +365,15 @@ where } #[inline] - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - // Make sure we don't get stuck crashing on a single closure - RetryRestartHelper::restart_progress_should_run(state, self, 3) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + // There's no restart safety in the content of the closure. + // don't restart + RetryCountRestartHelper::no_retry(state, &self.name) } #[inline] - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - RetryRestartHelper::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + RetryCountRestartHelper::clear_progress(state, &self.name) } } @@ -339,28 +382,25 @@ impl ClosureStage { /// Create a new [`ClosureStage`] #[must_use] pub fn new(closure: CB) -> Self { + // unsafe but impossible that you create two threads both instantiating this instance + let stage_id = unsafe { + let ret = CLOSURE_STAGE_ID; + CLOSURE_STAGE_ID += 1; + ret + }; Self { + name: Cow::Owned(CLOSURE_STAGE_NAME.to_owned() + ":" + stage_id.to_string().as_ref()), closure, phantom: PhantomData, } } } -impl From for ClosureStage -where - CB: FnMut(&mut Z, &mut E, &mut ::State, &mut EM) -> Result<(), Error>, - E: UsesState, -{ - #[must_use] - fn from(closure: CB) -> Self { - Self::new(closure) - } -} - /// Allows us to use a [`push::PushStage`] as a normal [`Stage`] #[allow(clippy::type_complexity)] #[derive(Debug)] pub struct PushStageAdapter { + name: Cow<'static, str>, push_stage: PS, phantom: PhantomData<(CS, EM, OT, Z)>, } @@ -370,53 +410,78 @@ impl PushStageAdapter { /// to be used as a normal [`Stage`] #[must_use] pub fn new(push_stage: PS) -> Self { + // unsafe but impossible that you create two threads both instantiating this instance + let stage_id = unsafe { + let ret = PUSH_STAGE_ADAPTER_ID; + PUSH_STAGE_ADAPTER_ID += 1; + ret + }; Self { + name: Cow::Owned( + PUSH_STAGE_ADAPTER_NAME.to_owned() + ":" + stage_id.to_string().as_str(), + ), push_stage, phantom: PhantomData, } } } +/// The unique counter for this stage +static mut PUSH_STAGE_ADAPTER_ID: usize = 0; +/// The name for push stage adapter +pub static PUSH_STAGE_ADAPTER_NAME: &str = "pushstageadapter"; impl UsesState for PushStageAdapter where - CS: UsesState, + Z: UsesState, { - type State = CS::State; + type State = Z::State; +} + +impl Named for PushStageAdapter { + #[must_use] + fn name(&self) -> &Cow<'static, str> { + &self.name + } } impl Stage for PushStageAdapter where - CS: Scheduler, - Self::State: - HasExecutions + HasMetadata + HasRand + HasCorpus + HasLastReportTime + HasCurrentCorpusId, - E: Executor + HasObservers, + CS: Scheduler, + Self::State: HasExecutions + + HasRand + + HasCorpus + + HasLastReportTime + + HasCurrentCorpusId + + HasNamedMetadata + + HasMetadata, + E: Executor::State> + HasObservers, EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, - OT: ObserversTuple, + OT: ObserversTuple, PS: PushStage, - Z: ExecutesInput - + ExecutionProcessor - + EvaluatorObservers + Z: ExecutesInput + + ExecutionProcessor + + EvaluatorObservers + HasScheduler, { fn perform( &mut self, fuzzer: &mut Z, executor: &mut E, - state: &mut CS::State, + state: &mut Z::State, event_mgr: &mut EM, ) -> Result<(), Error> { let push_stage = &mut self.push_stage; - let Some(corpus_idx) = state.current_corpus_id()? else { + let Some(corpus_id) = state.current_corpus_id()? else { return Err(Error::illegal_state( "state is not currently processing a corpus index", )); }; - push_stage.set_current_corpus_id(corpus_idx); + push_stage.set_current_corpus_id(corpus_id); push_stage.init(fuzzer, state, event_mgr, &mut *executor.observers_mut())?; @@ -446,48 +511,51 @@ where } #[inline] - fn restart_progress_should_run(&mut self, _state: &mut Self::State) -> Result { + fn should_restart(&mut self, state: &mut Self::State) -> Result { // TODO: Proper restart handling - call post_exec at the right time, etc... - Ok(true) + RetryCountRestartHelper::no_retry(state, &self.name) } #[inline] - fn clear_restart_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { - Ok(()) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + RetryCountRestartHelper::clear_progress(state, &self.name) } } /// Progress which permits a fixed amount of resumes per round of fuzzing. If this amount is ever /// exceeded, the input will no longer be executed by this stage. #[derive(Clone, Deserialize, Serialize, Debug)] -pub struct RetryRestartHelper { +pub struct RetryCountRestartHelper { tries_remaining: Option, skipped: HashSet, } -impl_serdeany!(RetryRestartHelper); +impl_serdeany!(RetryCountRestartHelper); + +impl RetryCountRestartHelper { + /// Don't allow restart + pub fn no_retry(state: &mut S, name: &str) -> Result + where + S: HasNamedMetadata + HasCurrentCorpusId, + { + Self::should_restart(state, name, 1) + } -impl RetryRestartHelper { /// Initializes (or counts down in) the progress helper, giving it the amount of max retries /// /// Returns `true` if the stage should run - pub fn restart_progress_should_run( - state: &mut S, - stage: &ST, - max_retries: usize, - ) -> Result + pub fn should_restart(state: &mut S, name: &str, max_retries: usize) -> Result where S: HasNamedMetadata + HasCurrentCorpusId, - ST: Named, { - let corpus_idx = state.current_corpus_id()?.ok_or_else(|| { + let corpus_id = state.current_corpus_id()?.ok_or_else(|| { Error::illegal_state( - "No current_corpus_id set in State, but called RetryRestartHelper::should_skip", + "No current_corpus_id set in State, but called RetryCountRestartHelper::should_skip", ) })?; let initial_tries_remaining = max_retries + 1; - let metadata = state.named_metadata_or_insert_with(stage.name(), || Self { + let metadata = state.named_metadata_or_insert_with(name, || Self { tries_remaining: Some(initial_tries_remaining), skipped: HashSet::new(), }); @@ -504,9 +572,9 @@ impl RetryRestartHelper { metadata.tries_remaining = Some(tries_remaining); Ok(if tries_remaining == 0 { - metadata.skipped.insert(corpus_idx); + metadata.skipped.insert(corpus_id); false - } else if metadata.skipped.contains(&corpus_idx) { + } else if metadata.skipped.contains(&corpus_id) { // skip this testcase, we already retried it often enough... false } else { @@ -515,14 +583,11 @@ impl RetryRestartHelper { } /// Clears the progress - pub fn clear_restart_progress(state: &mut S, stage: &ST) -> Result<(), Error> + pub fn clear_progress(state: &mut S, name: &str) -> Result<(), Error> where S: HasNamedMetadata, - ST: Named, { - state - .named_metadata_mut::(stage.name())? - .tries_remaining = None; + state.named_metadata_mut::(name)?.tries_remaining = None; Ok(()) } } @@ -539,15 +604,15 @@ impl fmt::Display for StageId { } /// Trait for types which track the current stage -pub trait HasCurrentStage { +pub trait HasCurrentStageId { /// Set the current stage; we have started processing this stage - fn set_current_stage_idx(&mut self, idx: StageId) -> Result<(), Error>; + fn set_current_stage_id(&mut self, id: StageId) -> Result<(), Error>; /// Clear the current stage; we are done processing this stage - fn clear_stage(&mut self) -> Result<(), Error>; + fn clear_stage_id(&mut self) -> Result<(), Error>; /// Fetch the current stage -- typically used after a state recovery or transfer - fn current_stage_idx(&self) -> Result, Error>; + fn current_stage_id(&self) -> Result, Error>; /// Notify of a reset from which we may recover fn on_restart(&mut self) -> Result<(), Error> { @@ -557,7 +622,7 @@ pub trait HasCurrentStage { /// Trait for types which track nested stages. Stages which themselves contain stage tuples should /// ensure that they constrain the state with this trait accordingly. -pub trait HasNestedStageStatus: HasCurrentStage { +pub trait HasNestedStageStatus: HasCurrentStageId { /// Enter a stage scope, potentially resuming to an inner stage status. Returns Ok(true) if /// resumed. fn enter_inner_stage(&mut self) -> Result<(), Error>; @@ -597,15 +662,15 @@ impl ExecutionCountRestartHelper { } /// The execs done since start of this [`Stage`]/helper - pub fn execs_since_progress_start(&mut self, state: &mut S) -> Result + pub fn execs_since_progress_start(&mut self, state: &mut S, name: &str) -> Result where - S: HasMetadata + HasExecutions, + S: HasNamedMetadata + HasExecutions, { let started_at_execs = if let Some(started_at_execs) = self.started_at_execs { started_at_execs } else { state - .metadata::() + .named_metadata::(name) .map(|x| { self.started_at_execs = Some(x.started_at_execs); x.started_at_execs @@ -620,32 +685,33 @@ impl ExecutionCountRestartHelper { } /// Initialize progress for the stage this wrapper wraps. - pub fn restart_progress_should_run(&mut self, state: &mut S) -> Result + pub fn should_restart(&mut self, state: &mut S, name: &str) -> Result where - S: HasMetadata + HasExecutions, + S: HasNamedMetadata + HasExecutions, { let executions = *state.executions(); - let metadata = state.metadata_or_insert_with(|| ExecutionCountRestartHelperMetadata { - started_at_execs: executions, - }); + let metadata = + state.named_metadata_or_insert_with(name, || ExecutionCountRestartHelperMetadata { + started_at_execs: executions, + }); self.started_at_execs = Some(metadata.started_at_execs); Ok(true) } /// Clear progress for the stage this wrapper wraps. - pub fn clear_restart_progress(&mut self, state: &mut S) -> Result<(), Error> + pub fn clear_progress(&mut self, state: &mut S, name: &str) -> Result<(), Error> where - S: HasMetadata, + S: HasNamedMetadata, { self.started_at_execs = None; - let _metadata = state.remove_metadata::(); - debug_assert!(_metadata.is_some(), "Called clear_restart_progress, but restart_progress_should_run was not called before (or did mutational stages get nested?)"); + let _metadata = state.remove_named_metadata::(name); + debug_assert!(_metadata.is_some(), "Called clear_progress, but should_restart was not called before (or did mutational stages get nested?)"); Ok(()) } } #[cfg(test)] -pub mod test { +mod test { use alloc::borrow::Cow; use core::marker::PhantomData; @@ -655,16 +721,18 @@ pub mod test { use crate::{ corpus::{Corpus, HasCurrentCorpusId, Testcase}, inputs::NopInput, - stages::{RetryRestartHelper, Stage}, - state::{test::test_std_state, HasCorpus, State, UsesState}, + stages::{RetryCountRestartHelper, Stage}, + state::{HasCorpus, State, StdState, UsesState}, HasMetadata, }; + /// A stage that succeeds to resume #[derive(Debug)] pub struct ResumeSucceededStage { phantom: PhantomData, } + /// A progress state for testing #[derive(Serialize, Deserialize, Debug)] pub struct TestProgress { count: usize, @@ -674,7 +742,7 @@ pub mod test { impl TestProgress { #[allow(clippy::unnecessary_wraps)] - fn restart_progress_should_run(state: &mut S, _stage: &ST) -> Result + fn should_restart(state: &mut S, _stage: &ST) -> Result where S: HasMetadata, { @@ -690,7 +758,7 @@ pub mod test { Ok(true) } - fn clear_restart_progress(state: &mut S, _stage: &ST) -> Result<(), Error> + fn clear_progress(state: &mut S, _stage: &ST) -> Result<(), Error> where S: HasMetadata, { @@ -727,22 +795,23 @@ pub mod test { Ok(()) } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - TestProgress::restart_progress_should_run(state, self) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + TestProgress::should_restart(state, self) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - TestProgress::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + TestProgress::clear_progress(state, self) } } + /// Test to test retries in stages #[test] fn test_tries_progress() -> Result<(), Error> { // # Safety // No concurrency per testcase #[cfg(any(not(feature = "serdeany_autoreg"), miri))] unsafe { - RetryRestartHelper::register(); + RetryCountRestartHelper::register(); } struct StageWithOneTry; @@ -754,53 +823,68 @@ pub mod test { } } - let mut state = test_std_state(); + #[allow(clippy::similar_names)] + let mut state = StdState::nop()?; let stage = StageWithOneTry; - let corpus_idx = state.corpus_mut().add(Testcase::new(NopInput {}))?; + let corpus_id = state.corpus_mut().add(Testcase::new(NopInput {}))?; - state.set_corpus_idx(corpus_idx)?; + state.set_corpus_id(corpus_id)?; for _ in 0..10 { // used normally, no retries means we never skip - assert!(RetryRestartHelper::restart_progress_should_run( - &mut state, &stage, 1 + assert!(RetryCountRestartHelper::should_restart( + &mut state, + stage.name(), + 1 )?); - RetryRestartHelper::clear_restart_progress(&mut state, &stage)?; + RetryCountRestartHelper::clear_progress(&mut state, stage.name())?; } for _ in 0..10 { // used normally, only one retry means we never skip - assert!(RetryRestartHelper::restart_progress_should_run( - &mut state, &stage, 2 + assert!(RetryCountRestartHelper::should_restart( + &mut state, + stage.name(), + 2 )?); - assert!(RetryRestartHelper::restart_progress_should_run( - &mut state, &stage, 2 + assert!(RetryCountRestartHelper::should_restart( + &mut state, + stage.name(), + 2 )?); - RetryRestartHelper::clear_restart_progress(&mut state, &stage)?; + RetryCountRestartHelper::clear_progress(&mut state, stage.name())?; } - assert!(RetryRestartHelper::restart_progress_should_run( - &mut state, &stage, 2 + assert!(RetryCountRestartHelper::should_restart( + &mut state, + stage.name(), + 2 )?); // task failed, let's resume // we still have one more try! - assert!(RetryRestartHelper::restart_progress_should_run( - &mut state, &stage, 2 + assert!(RetryCountRestartHelper::should_restart( + &mut state, + stage.name(), + 2 )?); // task failed, let's resume // out of retries, so now we skip - assert!(!RetryRestartHelper::restart_progress_should_run( - &mut state, &stage, 2 + assert!(!RetryCountRestartHelper::should_restart( + &mut state, + stage.name(), + 2 )?); - RetryRestartHelper::clear_restart_progress(&mut state, &stage)?; + RetryCountRestartHelper::clear_progress(&mut state, stage.name())?; // we previously exhausted this testcase's retries, so we skip - assert!(!RetryRestartHelper::restart_progress_should_run( - &mut state, &stage, 2 + assert!(!RetryCountRestartHelper::should_restart( + &mut state, + stage.name(), + 2 )?); - RetryRestartHelper::clear_restart_progress(&mut state, &stage)?; + RetryCountRestartHelper::clear_progress(&mut state, stage.name())?; Ok(()) } diff --git a/libafl/src/stages/mutational.rs b/libafl/src/stages/mutational.rs index 3d45b7c287..a4e7c85ea6 100644 --- a/libafl/src/stages/mutational.rs +++ b/libafl/src/stages/mutational.rs @@ -1,8 +1,11 @@ //| The [`MutationalStage`] is the default stage used during fuzzing. //! For the current input, it will perform a range of random mutations, and then run them in the executor. -use alloc::borrow::Cow; -use core::marker::PhantomData; +use alloc::{ + borrow::{Cow, ToOwned}, + string::ToString, +}; +use core::{marker::PhantomData, num::NonZeroUsize}; use libafl_bolts::{rands::Rand, Named}; @@ -12,7 +15,8 @@ use crate::{ inputs::Input, mark_feature_time, mutators::{MultiMutator, MutationResult, Mutator}, - stages::{ExecutionCountRestartHelper, RetryRestartHelper, Stage}, + nonzero, + stages::{RetryCountRestartHelper, Stage}, start_timer, state::{HasCorpus, HasCurrentTestcase, HasExecutions, HasRand, UsesState}, Error, HasMetadata, HasNamedMetadata, @@ -27,7 +31,7 @@ use crate::{monitors::PerfFeature, state::HasClientPerfMonitor}; pub trait MutatedTransformPost: Sized { /// Perform any post-execution steps necessary for the transformed input (e.g., updating metadata) #[inline] - fn post_exec(self, state: &mut S, new_corpus_idx: Option) -> Result<(), Error> { + fn post_exec(self, state: &mut S, new_corpus_id: Option) -> Result<(), Error> { Ok(()) } } @@ -57,7 +61,8 @@ where impl MutatedTransform for I where I: Input + Clone, - S: HasCorpus, + S: HasCorpus, + S::Corpus: Corpus, { type Post = (); @@ -86,8 +91,9 @@ where M: Mutator, EM: UsesState, Z: Evaluator, - Self::State: HasCorpus, + Self::State: HasCorpus + HasCurrentTestcase, I: MutatedTransform + Clone, + <::State as HasCorpus>::Corpus: Corpus, { /// The mutator registered for this stage fn mutator(&self) -> &M; @@ -98,9 +104,6 @@ where /// Gets the number of iterations this mutator should run for. fn iterations(&self, state: &mut Self::State) -> Result; - /// Gets the number of executions this mutator already did since it got first called in this fuzz round. - fn execs_since_progress_start(&mut self, state: &mut Self::State) -> Result; - /// Runs this (mutational) stage for the given testcase #[allow(clippy::cast_possible_wrap)] // more than i32 stages on 32 bit system - highly unlikely... fn perform_mutational( @@ -142,12 +145,12 @@ where // Time is measured directly the `evaluate_input` function let (untransformed, post) = input.try_transform_into(state)?; - let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, untransformed)?; - if corpus_idx.is_some() { unsafe {MUTATION_STAGE_SUCCESS += 1;}} + let (_, corpus_id) = fuzzer.evaluate_input(state, executor, manager, untransformed)?; + if corpus_idx.is_some() { unsafe {MUTATION_STAGE_SUCCESS += 1;}} // count successful mutations start_timer!(state); - self.mutator_mut().post_exec(state, corpus_idx)?; - post.post_exec(state, corpus_idx)?; + self.mutator_mut().post_exec(state, corpus_id)?; + post.post_exec(state, corpus_id)?; mark_feature_time!(state, PerfFeature::MutatePostExec); } @@ -157,17 +160,17 @@ where /// Default value, how many iterations each stage gets, as an upper bound. /// It may randomly continue earlier. -pub static DEFAULT_MUTATIONAL_MAX_ITERATIONS: usize = 128; +pub const DEFAULT_MUTATIONAL_MAX_ITERATIONS: usize = 128; /// The default mutational stage #[derive(Clone, Debug)] pub struct StdMutationalStage { + /// The name + name: Cow<'static, str>, /// The mutator(s) to use mutator: M, /// The maximum amount of iterations we should do each round - max_iterations: usize, - /// The progress helper for this mutational stage - restart_helper: ExecutionCountRestartHelper, + max_iterations: NonZeroUsize, #[allow(clippy::type_complexity)] phantom: PhantomData<(E, EM, I, Z)>, } @@ -178,8 +181,9 @@ where EM: UsesState, M: Mutator, Z: Evaluator, - Self::State: HasCorpus + HasRand + HasExecutions + HasMetadata, + Z::State: HasCorpus + HasRand + HasExecutions + HasMetadata + HasNamedMetadata, I: MutatedTransform + Clone, + <::State as HasCorpus>::Corpus: Corpus, //delete me { /// The mutator, added to this stage #[inline] @@ -197,12 +201,13 @@ where fn iterations(&self, state: &mut Self::State) -> Result { Ok(1 + state.rand_mut().below(self.max_iterations)) } - - fn execs_since_progress_start(&mut self, state: &mut Self::State) -> Result { - self.restart_helper.execs_since_progress_start(state) - } } +/// The unique id for mutational stage +static mut MUTATIONAL_STAGE_ID: usize = 0; +/// The name for mutational stage +pub static MUTATIONAL_STAGE_NAME: &str = "mutational"; + impl UsesState for StdMutationalStage where Z: UsesState, @@ -210,14 +215,21 @@ where type State = Z::State; } +impl Named for StdMutationalStage { + fn name(&self) -> &Cow<'static, str> { + &self.name + } +} + impl Stage for StdMutationalStage where E: UsesState, EM: UsesState, M: Mutator, Z: Evaluator, - Self::State: HasCorpus + HasRand + HasMetadata + HasExecutions, + Z::State: HasCorpus + HasRand + HasMetadata + HasExecutions + HasNamedMetadata, I: MutatedTransform + Clone, + <::State as HasCorpus>::Corpus: Corpus, //delete me { #[inline] #[allow(clippy::let_and_return)] @@ -236,14 +248,12 @@ where ret } - fn restart_progress_should_run(&mut self, _state: &mut Self::State) -> Result { - Ok(true) - // self.restart_helper.restart_progress_should_run(state) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + RetryCountRestartHelper::should_restart(state, &self.name, 3) } - fn clear_restart_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { - Ok(()) - // self.restart_helper.clear_restart_progress(state) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + RetryCountRestartHelper::clear_progress(state, &self.name) } } @@ -257,11 +267,13 @@ where { /// Creates a new default mutational stage pub fn new(mutator: M) -> Self { - Self::transforming_with_max_iterations(mutator, DEFAULT_MUTATIONAL_MAX_ITERATIONS) + // Safe to unwrap: DEFAULT_MUTATIONAL_MAX_ITERATIONS is never 0. + Self::transforming_with_max_iterations(mutator, nonzero!(DEFAULT_MUTATIONAL_MAX_ITERATIONS)) } /// Creates a new mutational stage with the given max iterations - pub fn with_max_iterations(mutator: M, max_iterations: usize) -> Self { + #[inline] + pub fn with_max_iterations(mutator: M, max_iterations: NonZeroUsize) -> Self { Self::transforming_with_max_iterations(mutator, max_iterations) } } @@ -276,15 +288,27 @@ where { /// Creates a new transforming mutational stage with the default max iterations pub fn transforming(mutator: M) -> Self { - Self::transforming_with_max_iterations(mutator, DEFAULT_MUTATIONAL_MAX_ITERATIONS) + // Safe to unwrap: DEFAULT_MUTATIONAL_MAX_ITERATIONS is never 0. + Self::transforming_with_max_iterations(mutator, nonzero!(DEFAULT_MUTATIONAL_MAX_ITERATIONS)) } /// Creates a new transforming mutational stage with the given max iterations - pub fn transforming_with_max_iterations(mutator: M, max_iterations: usize) -> Self { + /// + /// # Errors + /// Will return [`Error::IllegalArgument`] for `max_iterations` of 0. + #[inline] + pub fn transforming_with_max_iterations(mutator: M, max_iterations: NonZeroUsize) -> Self { + let stage_id = unsafe { + let ret = MUTATIONAL_STAGE_ID; + MUTATIONAL_STAGE_ID += 1; + ret + }; + let name = + Cow::Owned(MUTATIONAL_STAGE_NAME.to_owned() + ":" + stage_id.to_string().as_str()); Self { + name, mutator, max_iterations, - restart_helper: ExecutionCountRestartHelper::default(), phantom: PhantomData, } } @@ -293,11 +317,17 @@ where /// A mutational stage that operates on multiple inputs, as returned by [`MultiMutator::multi_mutate`]. #[derive(Clone, Debug)] pub struct MultiMutationalStage { + name: Cow<'static, str>, mutator: M, #[allow(clippy::type_complexity)] phantom: PhantomData<(E, EM, I, Z)>, } +/// The unique id for multi mutational stage +static mut MULTI_MUTATIONAL_STAGE_ID: usize = 0; +/// The name for multi mutational stage +pub static MULTI_MUTATIONAL_STAGE_NAME: &str = "multimutational"; + impl UsesState for MultiMutationalStage where Z: UsesState, @@ -307,8 +337,7 @@ where impl Named for MultiMutationalStage { fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed("MultiMutational"); - &NAME + &self.name } } @@ -318,19 +347,19 @@ where EM: UsesState, M: MultiMutator, Z: Evaluator, - Self::State: HasCorpus + HasRand + HasNamedMetadata, + Z::State: HasCorpus + HasRand + HasNamedMetadata + HasCurrentTestcase, I: MutatedTransform + Clone, + <::State as HasCorpus>::Corpus: Corpus, //delete me { #[inline] - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - // TODO: add proper crash/timeout handling - // For now, Make sure we don't get stuck crashing on a single testcase - RetryRestartHelper::restart_progress_should_run(state, self, 3) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + // Make sure we don't get stuck crashing on a single testcase + RetryCountRestartHelper::should_restart(state, &self.name, 3) } #[inline] - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - RetryRestartHelper::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + RetryCountRestartHelper::clear_progress(state, &self.name) } #[inline] @@ -354,9 +383,9 @@ where for new_input in generated { // Time is measured directly the `evaluate_input` function let (untransformed, post) = new_input.try_transform_into(state)?; - let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, untransformed)?; - self.mutator.multi_post_exec(state, corpus_idx)?; - post.post_exec(state, corpus_idx)?; + let (_, corpus_id) = fuzzer.evaluate_input(state, executor, manager, untransformed)?; + self.mutator.multi_post_exec(state, corpus_id)?; + post.post_exec(state, corpus_id)?; } // println!("Found {}", found); @@ -377,7 +406,16 @@ where impl MultiMutationalStage { /// Creates a new transforming mutational stage pub fn transforming(mutator: M) -> Self { + // unsafe but impossible that you create two threads both instantiating this instance + let stage_id = unsafe { + let ret = MULTI_MUTATIONAL_STAGE_ID; + MULTI_MUTATIONAL_STAGE_ID += 1; + ret + }; Self { + name: Cow::Owned( + MULTI_MUTATIONAL_STAGE_NAME.to_owned() + ":" + stage_id.to_string().as_str(), + ), mutator, phantom: PhantomData, } diff --git a/libafl/src/stages/power.rs b/libafl/src/stages/power.rs index 41c3abea40..83083a692e 100644 --- a/libafl/src/stages/power.rs +++ b/libafl/src/stages/power.rs @@ -1,19 +1,27 @@ //! The power schedules. This stage should be invoked after the calibration stage. -use alloc::borrow::Cow; +use alloc::{ + borrow::{Cow, ToOwned}, + string::ToString, +}; use core::{fmt::Debug, marker::PhantomData}; use libafl_bolts::Named; use crate::{ + corpus::Corpus, executors::{Executor, HasObservers}, fuzzer::Evaluator, + inputs::Input, mutators::Mutator, schedulers::{testcase_score::CorpusPowerTestcaseScore, TestcaseScore}, - stages::{mutational::MutatedTransform, ExecutionCountRestartHelper, MutationalStage, Stage}, + stages::{mutational::MutatedTransform, MutationalStage, RetryCountRestartHelper, Stage}, state::{HasCorpus, HasCurrentTestcase, HasExecutions, HasRand, UsesState}, - Error, HasMetadata, + Error, HasMetadata, HasNamedMetadata, }; + +/// The unique id for this stage +static mut POWER_MUTATIONAL_STAGE_ID: usize = 0; /// Default name for `PowerMutationalStage`; derived from AFL++ pub const POWER_MUTATIONAL_STAGE_NAME: &str = "power"; /// The mutational stage using power schedules @@ -22,8 +30,6 @@ pub struct PowerMutationalStage { name: Cow<'static, str>, /// The mutators we use mutator: M, - /// Helper for restarts - restart_helper: ExecutionCountRestartHelper, #[allow(clippy::type_complexity)] phantom: PhantomData<(E, F, EM, I, Z)>, } @@ -46,10 +52,13 @@ where E: Executor + HasObservers, EM: UsesState, F: TestcaseScore, + I: Input, M: Mutator, - Self::State: HasCorpus + HasMetadata + HasRand + HasExecutions, + E::State: + HasCorpus + HasMetadata + HasRand + HasExecutions + HasNamedMetadata + HasCurrentTestcase, Z: Evaluator, I: MutatedTransform + Clone, + <::State as HasCorpus>::Corpus: Corpus, //delete me { /// The mutator, added to this stage #[inline] @@ -72,10 +81,6 @@ where Ok(score) } - - fn execs_since_progress_start(&mut self, state: &mut Self::State) -> Result { - self.restart_helper.execs_since_progress_start(state) - } } impl Stage for PowerMutationalStage @@ -84,9 +89,11 @@ where EM: UsesState, F: TestcaseScore, M: Mutator, - Self::State: HasCorpus + HasMetadata + HasRand + HasExecutions, + E::State: + HasCorpus + HasMetadata + HasRand + HasExecutions + HasNamedMetadata + HasCurrentTestcase, Z: Evaluator, - I: MutatedTransform + Clone, + I: MutatedTransform + Clone + Input, + <::State as HasCorpus>::Corpus: Corpus, //delete me { #[inline] #[allow(clippy::let_and_return)] @@ -101,37 +108,44 @@ where ret } - fn restart_progress_should_run(&mut self, _state: &mut Self::State) -> Result { - Ok(true) - // self.restart_helper.restart_progress_should_run(state) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + // Make sure we don't get stuck crashing on a single testcase + RetryCountRestartHelper::should_restart(state, &self.name, 3) } - fn clear_restart_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { - Ok(()) - // self.restart_helper.clear_restart_progress(state) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + RetryCountRestartHelper::clear_progress(state, &self.name) } } -impl PowerMutationalStage +impl PowerMutationalStage where E: Executor + HasObservers, EM: UsesState::State>, F: TestcaseScore<::State>, - M: Mutator::State>, + I: Input, + M: Mutator::State>, ::State: HasCorpus + HasMetadata + HasRand, Z: Evaluator::State>, { /// Creates a new [`PowerMutationalStage`] pub fn new(mutator: M) -> Self { + // unsafe but impossible that you create two threads both instantiating this instance + let stage_id = unsafe { + let ret = POWER_MUTATIONAL_STAGE_ID; + POWER_MUTATIONAL_STAGE_ID += 1; + ret + }; Self { - name: Cow::Borrowed(POWER_MUTATIONAL_STAGE_NAME), + name: Cow::Owned( + POWER_MUTATIONAL_STAGE_NAME.to_owned() + ":" + stage_id.to_string().as_str(), + ), mutator, phantom: PhantomData, - restart_helper: ExecutionCountRestartHelper::default(), } } } /// The standard powerscheduling stage pub type StdPowerMutationalStage = - PowerMutationalStage::State>, EM, I, M, Z>; + PowerMutationalStage; diff --git a/libafl/src/stages/push/mod.rs b/libafl/src/stages/push/mod.rs index 3db9a18c8d..a8304962e1 100644 --- a/libafl/src/stages/push/mod.rs +++ b/libafl/src/stages/push/mod.rs @@ -1,3 +1,5 @@ +//! [`PushStage`]`s` return inputs instead of calling an executor +//! //! While normal stages call the executor over and over again, push stages turn this concept upside down: //! A push stage instead returns an iterator that generates a new result for each time it gets called. //! With the new testcase, you will have to take care about testcase execution, manually. @@ -34,16 +36,14 @@ const STATS_TIMEOUT_DEFAULT: Duration = Duration::from_secs(15); #[derive(Clone, Debug)] pub struct PushStageSharedState where - CS: Scheduler, - EM: EventFirer + EventRestarter + HasEventManagerId, - OT: ObserversTuple, - CS::State: HasRand + HasCorpus, - Z: ExecutionProcessor - + EvaluatorObservers - + HasScheduler, + CS: Scheduler, + EM: EventFirer + EventRestarter + HasEventManagerId, + OT: ObserversTuple, + Z::State: HasRand + HasCorpus, + Z: ExecutionProcessor + EvaluatorObservers + HasScheduler, { /// The [`crate::state::State`] - pub state: CS::State, + pub state: Z::State, /// The [`crate::fuzzer::Fuzzer`] instance pub fuzzer: Z, /// The [`crate::events::EventManager`] @@ -55,17 +55,15 @@ where impl PushStageSharedState where - CS: Scheduler, - EM: EventFirer + EventRestarter + HasEventManagerId, - OT: ObserversTuple, - CS::State: HasRand + HasCorpus, - Z: ExecutionProcessor - + EvaluatorObservers - + HasScheduler, + CS: Scheduler, + EM: EventFirer + EventRestarter + HasEventManagerId, + OT: ObserversTuple, + Z::State: HasRand + HasCorpus, + Z: ExecutionProcessor + EvaluatorObservers + HasScheduler, { /// Create a new `PushStageSharedState` that can be used by all [`PushStage`]s #[must_use] - pub fn new(fuzzer: Z, state: CS::State, observers: OT, event_mgr: EM) -> Self { + pub fn new(fuzzer: Z, state: Z::State, observers: OT, event_mgr: EM) -> Self { Self { state, fuzzer, @@ -80,13 +78,11 @@ where #[derive(Clone, Debug)] pub struct PushStageHelper where - CS: Scheduler, - EM: EventFirer + EventRestarter + HasEventManagerId, - OT: ObserversTuple, - CS::State: HasRand + HasCorpus, - Z: ExecutionProcessor - + EvaluatorObservers - + HasScheduler, + CS: Scheduler, + EM: EventFirer + EventRestarter + HasEventManagerId, + OT: ObserversTuple, + Z::State: HasRand + HasCorpus, + Z: ExecutionProcessor + EvaluatorObservers + HasScheduler, { /// If this stage has already been initalized. /// This gets reset to `false` after one iteration of the stage is done. @@ -101,7 +97,7 @@ where pub current_corpus_id: Option, /// The input we just ran - pub current_input: Option<::Input>, // Todo: Get rid of copy + pub current_input: Option<::Input>, // Todo: Get rid of copy #[allow(clippy::type_complexity)] phantom: PhantomData<(CS, EM, OT, Z)>, @@ -110,13 +106,11 @@ where impl PushStageHelper where - CS: Scheduler, - EM: EventFirer + EventRestarter + HasEventManagerId, - OT: ObserversTuple, - CS::State: HasRand + HasCorpus, - Z: ExecutionProcessor - + EvaluatorObservers - + HasScheduler, + CS: Scheduler, + EM: EventFirer + EventRestarter + HasEventManagerId, + OT: ObserversTuple, + Z::State: HasRand + HasCorpus, + Z: ExecutionProcessor + EvaluatorObservers + HasScheduler, { /// Create a new [`PushStageHelper`] #[must_use] @@ -179,13 +173,11 @@ where /// After it has finished once, we will call it agan for the next fuzzer round. pub trait PushStage: Iterator where - CS: Scheduler, - CS::State: HasRand + HasExecutions + HasMetadata + HasCorpus + HasLastReportTime, - EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, - OT: ObserversTuple, - Z: ExecutionProcessor - + EvaluatorObservers - + HasScheduler, + CS: Scheduler, + Z::State: HasRand + HasExecutions + HasMetadata + HasCorpus + HasLastReportTime, + EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, + OT: ObserversTuple, + Z: ExecutionProcessor + EvaluatorObservers + HasScheduler, { /// Gets the [`PushStageHelper`] fn push_stage_helper(&self) -> &PushStageHelper; @@ -193,8 +185,8 @@ where fn push_stage_helper_mut(&mut self) -> &mut PushStageHelper; /// Set the current corpus index this stage works on - fn set_current_corpus_id(&mut self, corpus_idx: CorpusId) { - self.push_stage_helper_mut().current_corpus_id = Some(corpus_idx); + fn set_current_corpus_id(&mut self, corpus_id: CorpusId) { + self.push_stage_helper_mut().current_corpus_id = Some(corpus_id); } /// Called by `next_std` when this stage is being initialized. @@ -204,7 +196,7 @@ where fn init( &mut self, _fuzzer: &mut Z, - _state: &mut CS::State, + _state: &mut Z::State, _event_mgr: &mut EM, _observers: &mut OT, ) -> Result<(), Error> { @@ -217,20 +209,20 @@ where fn pre_exec( &mut self, _fuzzer: &mut Z, - _state: &mut CS::State, + _state: &mut Z::State, _event_mgr: &mut EM, _observers: &mut OT, - ) -> Option::Input, Error>>; + ) -> Option::Input, Error>>; /// Called after the execution of a testcase finished. #[inline] fn post_exec( &mut self, _fuzzer: &mut Z, - _state: &mut CS::State, + _state: &mut Z::State, _event_mgr: &mut EM, _observers: &mut OT, - _input: ::Input, + _input: ::Input, _exit_kind: ExitKind, ) -> Result<(), Error> { Ok(()) @@ -241,7 +233,7 @@ where fn deinit( &mut self, _fuzzer: &mut Z, - _state: &mut CS::State, + _state: &mut Z::State, _event_mgr: &mut EM, _observers: &mut OT, ) -> Result<(), Error> { @@ -249,7 +241,7 @@ where } /// This is the default implementation for `next` for this stage - fn next_std(&mut self) -> Option::Input, Error>> { + fn next_std(&mut self) -> Option::Input, Error>> { let mut shared_state = { let shared_state_ref = &mut (*self.push_stage_helper_mut().shared_state).borrow_mut(); shared_state_ref.take().unwrap() diff --git a/libafl/src/stages/push/mutational.rs b/libafl/src/stages/push/mutational.rs index 4bfcb4511e..73d73703de 100644 --- a/libafl/src/stages/push/mutational.rs +++ b/libafl/src/stages/push/mutational.rs @@ -8,6 +8,7 @@ use core::{ }; use libafl_bolts::rands::Rand; +use serde::Serialize; use super::{PushStage, PushStageHelper, PushStageSharedState}; use crate::{ @@ -17,18 +18,21 @@ use crate::{ inputs::UsesInput, mark_feature_time, mutators::Mutator, + nonzero, observers::ObserversTuple, schedulers::Scheduler, start_timer, - state::{HasCorpus, HasExecutions, HasLastReportTime, HasRand}, + state::{HasCorpus, HasExecutions, HasLastReportTime, HasRand, UsesState}, Error, EvaluatorObservers, ExecutionProcessor, HasMetadata, HasScheduler, }; #[cfg(feature = "introspection")] use crate::{monitors::PerfFeature, state::HasClientPerfMonitor}; /// The default maximum number of mutations to perform per input. -pub static DEFAULT_MUTATIONAL_MAX_ITERATIONS: usize = 128; +pub const DEFAULT_MUTATIONAL_MAX_ITERATIONS: usize = 128; + /// A Mutational push stage is the stage in a fuzzing run that mutates inputs. +/// /// Mutational push stages will usually have a range of mutations that are /// being applied to the input one by one, between executions. /// The push version, in contrast to the normal stage, will return each testcase, instead of executing it. @@ -40,14 +44,12 @@ pub static DEFAULT_MUTATIONAL_MAX_ITERATIONS: usize = 128; #[derive(Clone, Debug)] pub struct StdMutationalPushStage where - CS: Scheduler, - EM: EventFirer + EventRestarter + HasEventManagerId, - M: Mutator, - OT: ObserversTuple, - CS::State: HasRand + HasCorpus + Clone + Debug, - Z: ExecutionProcessor - + EvaluatorObservers - + HasScheduler, + CS: Scheduler, + EM: EventFirer + EventRestarter + HasEventManagerId, + M: Mutator, + OT: ObserversTuple + Serialize, + Z::State: HasRand + HasCorpus + Clone + Debug, + Z: ExecutionProcessor + EvaluatorObservers + HasScheduler, { current_corpus_id: Option, testcases_to_do: usize, @@ -60,19 +62,19 @@ where impl StdMutationalPushStage where - CS: Scheduler, - EM: EventFirer + EventRestarter + HasEventManagerId, - M: Mutator, - OT: ObserversTuple, - CS::State: HasCorpus + HasRand + Clone + Debug, - Z: ExecutionProcessor - + EvaluatorObservers - + HasScheduler, + CS: Scheduler, + EM: EventFirer + EventRestarter + HasEventManagerId, + M: Mutator, + OT: ObserversTuple + Serialize, + Z::State: HasCorpus + HasRand + Clone + Debug, + Z: ExecutionProcessor + EvaluatorObservers + HasScheduler, { /// Gets the number of iterations as a random number #[allow(clippy::unused_self, clippy::unnecessary_wraps)] // TODO: we should put this function into a trait later - fn iterations(&self, state: &mut CS::State, _corpus_idx: CorpusId) -> Result { - Ok(1 + state.rand_mut().below(DEFAULT_MUTATIONAL_MAX_ITERATIONS)) + fn iterations(&self, state: &mut Z::State, _corpus_id: CorpusId) -> Result { + Ok(1 + state + .rand_mut() + .below(nonzero!(DEFAULT_MUTATIONAL_MAX_ITERATIONS))) } /// Sets the current corpus index @@ -83,15 +85,13 @@ where impl PushStage for StdMutationalPushStage where - CS: Scheduler, - EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, - M: Mutator, - OT: ObserversTuple, - CS::State: - HasCorpus + HasRand + HasExecutions + HasLastReportTime + HasMetadata + Clone + Debug, - Z: ExecutionProcessor - + EvaluatorObservers - + HasScheduler, + CS: Scheduler, + EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, + M: Mutator, + OT: ObserversTuple + Serialize, + Z::State: HasCorpus + HasRand + HasExecutions + HasLastReportTime + HasMetadata + Clone + Debug, + Z: ExecutionProcessor + EvaluatorObservers + HasScheduler, + <::State as HasCorpus>::Corpus: Corpus, //delete me { #[inline] fn push_stage_helper(&self) -> &PushStageHelper { @@ -107,13 +107,13 @@ where fn init( &mut self, fuzzer: &mut Z, - state: &mut CS::State, + state: &mut Z::State, _event_mgr: &mut EM, _observers: &mut OT, ) -> Result<(), Error> { // Find a testcase to work on, unless someone already set it - self.current_corpus_id = Some(if let Some(corpus_idx) = self.current_corpus_id { - corpus_idx + self.current_corpus_id = Some(if let Some(corpus_id) = self.current_corpus_id { + corpus_id } else { fuzzer.scheduler_mut().next(state)? }); @@ -126,10 +126,10 @@ where fn pre_exec( &mut self, _fuzzer: &mut Z, - state: &mut CS::State, + state: &mut Z::State, _event_mgr: &mut EM, _observers: &mut OT, - ) -> Option::Input, Error>> { + ) -> Option::Input, Error>> { if self.testcases_done >= self.testcases_to_do { // finished with this cicle. return None; @@ -161,15 +161,15 @@ where fn post_exec( &mut self, fuzzer: &mut Z, - state: &mut CS::State, + state: &mut Z::State, event_mgr: &mut EM, observers: &mut OT, - last_input: ::Input, + last_input: ::Input, exit_kind: ExitKind, ) -> Result<(), Error> { // todo: is_interesting, etc. - fuzzer.execute_and_process(state, event_mgr, last_input, observers, &exit_kind, true)?; + fuzzer.evaluate_execution(state, event_mgr, last_input, observers, &exit_kind, true)?; start_timer!(state); self.mutator.post_exec(state, self.current_corpus_id)?; @@ -183,7 +183,7 @@ where fn deinit( &mut self, _fuzzer: &mut Z, - _state: &mut CS::State, + _state: &mut Z::State, _event_mgr: &mut EM, _observers: &mut OT, ) -> Result<(), Error> { @@ -194,33 +194,29 @@ where impl Iterator for StdMutationalPushStage where - CS: Scheduler, - EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, - M: Mutator, - OT: ObserversTuple, - CS::State: - HasCorpus + HasRand + HasExecutions + HasMetadata + HasLastReportTime + Clone + Debug, - Z: ExecutionProcessor - + EvaluatorObservers - + HasScheduler, + CS: Scheduler, + EM: EventFirer + EventRestarter + HasEventManagerId + ProgressReporter, + M: Mutator, + OT: ObserversTuple + Serialize, + Z::State: HasCorpus + HasRand + HasExecutions + HasMetadata + HasLastReportTime + Clone + Debug, + Z: ExecutionProcessor + EvaluatorObservers + HasScheduler, + <::State as HasCorpus>::Corpus: Corpus, //delete me { - type Item = Result<::Input, Error>; + type Item = Result<::Input, Error>; - fn next(&mut self) -> Option::Input, Error>> { + fn next(&mut self) -> Option::Input, Error>> { self.next_std() } } impl StdMutationalPushStage where - CS: Scheduler, - EM: EventFirer + EventRestarter + HasEventManagerId, - M: Mutator, - OT: ObserversTuple, - CS::State: HasCorpus + HasRand + Clone + Debug, - Z: ExecutionProcessor - + EvaluatorObservers - + HasScheduler, + CS: Scheduler, + EM: EventFirer + EventRestarter + HasEventManagerId, + M: Mutator, + OT: ObserversTuple + Serialize, + Z::State: HasCorpus + HasRand + Clone + Debug, + Z: ExecutionProcessor + EvaluatorObservers + HasScheduler, { /// Creates a new default mutational stage #[must_use] diff --git a/libafl/src/stages/stats.rs b/libafl/src/stages/stats.rs index 61ed96419b..6be3de2fd7 100644 --- a/libafl/src/stages/stats.rs +++ b/libafl/src/stages/stats.rs @@ -1,4 +1,4 @@ -//! Stage to compute/report AFL stats +//! Stage to compute/report minimal AFL-like stats #[cfg(feature = "std")] use alloc::{borrow::Cow, string::ToString}; @@ -22,9 +22,9 @@ use crate::{ monitors::{AggregatorOps, UserStats, UserStatsValue}, }; -/// The [`AflStatsStage`] is a simple stage that computes and reports some stats. +/// The [`StatsStage`] is a simple stage that computes and reports some stats. #[derive(Debug, Clone)] -pub struct AflStatsStage { +pub struct StatsStage { // the number of testcases that have been fuzzed has_fuzzed_size: usize, // the number of "favored" testcases @@ -41,14 +41,14 @@ pub struct AflStatsStage { phantom: PhantomData<(E, EM, Z)>, } -impl UsesState for AflStatsStage +impl UsesState for StatsStage where E: UsesState, { type State = E::State; } -impl Stage for AflStatsStage +impl Stage for StatsStage where E: UsesState, EM: EventFirer, @@ -62,7 +62,34 @@ where state: &mut Self::State, _manager: &mut EM, ) -> Result<(), Error> { - let Some(corpus_idx) = state.current_corpus_id()? else { + self.update_and_report_afl_stats(state, _manager) + } + + #[inline] + fn should_restart(&mut self, _state: &mut Self::State) -> Result { + // Not running the target so we wont't crash/timeout and, hence, don't need to restore anything + Ok(true) + } + + #[inline] + fn clear_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { + // Not running the target so we wont't crash/timeout and, hence, don't need to restore anything + Ok(()) + } +} + +impl StatsStage { + fn update_and_report_afl_stats( + &mut self, + state: &mut ::State, + _manager: &mut EM, + ) -> Result<(), Error> + where + E: UsesState, + EM: EventFirer, + ::State: HasCorpus + HasImported, + { + let Some(corpus_id) = state.current_corpus_id()? else { return Err(Error::illegal_state( "state is not currently processing a corpus index", )); @@ -71,7 +98,7 @@ where // Report your stats every `STATS_REPORT_INTERVAL` // compute pending, pending_favored, imported, own_finds { - let testcase = state.corpus().get(corpus_idx)?.borrow(); + let testcase = state.corpus().get(corpus_id)?.borrow(); if testcase.scheduled_count() == 0 { self.has_fuzzed_size += 1; if testcase.has_metadata::() { @@ -102,7 +129,7 @@ where _manager.fire( state, Event::UpdateUserStats { - name: Cow::from("AflStats"), + name: Cow::from("Stats"), value: UserStats::new( UserStatsValue::String(Cow::from(json.to_string())), AggregatorOps::None, @@ -124,22 +151,10 @@ where Ok(()) } - - #[inline] - fn restart_progress_should_run(&mut self, _state: &mut Self::State) -> Result { - // Not running the target so we wont't crash/timeout and, hence, don't need to restore anything - Ok(true) - } - - #[inline] - fn clear_restart_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { - // Not running the target so we wont't crash/timeout and, hence, don't need to restore anything - Ok(()) - } } -impl AflStatsStage { - /// create a new instance of the [`AflStatsStage`] +impl StatsStage { + /// create a new instance of the [`StatsStage`] #[must_use] pub fn new(interval: Duration) -> Self { Self { @@ -149,8 +164,8 @@ impl AflStatsStage { } } -impl Default for AflStatsStage { - /// the default instance of the [`AflStatsStage`] +impl Default for StatsStage { + /// the default instance of the [`StatsStage`] #[must_use] fn default() -> Self { Self { diff --git a/libafl/src/stages/sync.rs b/libafl/src/stages/sync.rs index bf8f416188..3dbed175a2 100644 --- a/libafl/src/stages/sync.rs +++ b/libafl/src/stages/sync.rs @@ -1,30 +1,31 @@ //! The [`SyncFromDiskStage`] is a stage that imports inputs from disk for e.g. sync with AFL -use alloc::borrow::Cow; -use core::marker::PhantomData; -use std::{ - fs, - path::{Path, PathBuf}, - time::SystemTime, +use alloc::{ + borrow::{Cow, ToOwned}, vec::Vec, }; +use core::{marker::PhantomData, time::Duration}; +use std::path::{Path, PathBuf}; -use libafl_bolts::{current_time, shmem::ShMemProvider, Named}; +use libafl_bolts::{current_time, fs::find_new_files_rec, shmem::ShMemProvider, Named}; use serde::{Deserialize, Serialize}; #[cfg(feature = "introspection")] use crate::state::HasClientPerfMonitor; use crate::{ - corpus::{Corpus, CorpusId, HasTestcase}, + corpus::{Corpus, CorpusId}, events::{llmp::LlmpEventConverter, Event, EventConfig, EventFirer}, executors::{Executor, ExitKind, HasObservers}, fuzzer::{Evaluator, EvaluatorObservers, ExecutionProcessor}, inputs::{Input, InputConverter, UsesInput}, - stages::{RetryRestartHelper, Stage}, + stages::{RetryCountRestartHelper, Stage}, state::{HasCorpus, HasExecutions, HasRand, State, UsesState}, Error, HasMetadata, HasNamedMetadata, }; +/// Default name for `SyncFromDiskStage`; derived from AFL++ +pub const SYNC_FROM_DISK_STAGE_NAME: &str = "sync"; + /// Metadata used to store information about disk sync time #[cfg_attr( any(not(feature = "serdeany_autoreg"), miri), @@ -33,7 +34,7 @@ use crate::{ #[derive(Serialize, Deserialize, Debug)] pub struct SyncFromDiskMetadata { /// The last time the sync was done - pub last_time: SystemTime, + pub last_time: Duration, /// The paths that are left to sync pub left_to_sync: Vec, } @@ -43,7 +44,7 @@ libafl_bolts::impl_serdeany!(SyncFromDiskMetadata); impl SyncFromDiskMetadata { /// Create a new [`struct@SyncFromDiskMetadata`] #[must_use] - pub fn new(last_time: SystemTime, left_to_sync: Vec) -> Self { + pub fn new(last_time: Duration, left_to_sync: Vec) -> Self { Self { last_time, left_to_sync, @@ -51,15 +52,13 @@ impl SyncFromDiskMetadata { } } -/// Default name for `SyncFromDiskStage`; derived from AFL++ -pub const SYNC_FROM_DISK_STAGE_NAME: &str = "sync"; - /// A stage that loads testcases from disk to sync with other fuzzers such as AFL++ #[derive(Debug)] pub struct SyncFromDiskStage { name: Cow<'static, str>, - sync_dir: PathBuf, + sync_dirs: Vec, load_callback: CB, + interval: Duration, phantom: PhantomData<(E, EM, Z)>, } @@ -92,54 +91,50 @@ where state: &mut Self::State, manager: &mut EM, ) -> Result<(), Error> { - log::debug!("Syncing from disk: {:?}", self.sync_dir); let last = state .metadata_map() .get::() .map(|m| m.last_time); - if let (Some(max_time), mut new_files) = self.load_from_directory(&last)? { - if last.is_none() { - state - .metadata_map_mut() - .insert(SyncFromDiskMetadata::new(max_time, new_files)); - } else { - state - .metadata_map_mut() - .get_mut::() - .unwrap() - .last_time = max_time; - state - .metadata_map_mut() - .get_mut::() - .unwrap() - .left_to_sync - .append(&mut new_files); + if let Some(last) = last { + if current_time().saturating_sub(last) < self.interval { + return Ok(()); } } - if let Some(sync_from_disk_metadata) = - state.metadata_map_mut().get_mut::() - { - // Iterate over the paths of files left to sync. - // By keeping track of these files, we ensure that no file is missed during synchronization, - // even in the event of a target restart. - let to_sync = sync_from_disk_metadata.left_to_sync.clone(); - log::debug!("Number of files to sync: {:?}", to_sync.len()); - for path in to_sync { - let input = (self.load_callback)(fuzzer, state, &path)?; - // Removing each path from the `left_to_sync` Vec before evaluating - // prevents duplicate processing and ensures that each file is evaluated only once. This approach helps - // avoid potential infinite loops that may occur if a file is an objective. - state - .metadata_map_mut() - .get_mut::() - .unwrap() - .left_to_sync - .retain(|p| p != &path); - log::debug!("Evaluating: {:?}", path); - fuzzer.evaluate_input(state, executor, manager, input)?; - } + let new_max_time = current_time(); + + let mut new_files = vec![]; + for dir in &self.sync_dirs { + log::debug!("Syncing from dir: {:?}", dir); + let new_dir_files = find_new_files_rec(dir, &last)?; + new_files.extend(new_dir_files); + } + + let sync_from_disk_metadata = state + .metadata_or_insert_with(|| SyncFromDiskMetadata::new(new_max_time, new_files.clone())); + + // At the very first sync, last_time and file_to_sync are set twice + sync_from_disk_metadata.last_time = new_max_time; + sync_from_disk_metadata.left_to_sync = new_files; + + // Iterate over the paths of files left to sync. + // By keeping track of these files, we ensure that no file is missed during synchronization, + // even in the event of a target restart. + let to_sync = sync_from_disk_metadata.left_to_sync.clone(); + log::debug!("Number of files to sync: {:?}", to_sync.len()); + for path in to_sync { + let input = (self.load_callback)(fuzzer, state, &path)?; + // Removing each path from the `left_to_sync` Vec before evaluating + // prevents duplicate processing and ensures that each file is evaluated only once. This approach helps + // avoid potential infinite loops that may occur if a file is an objective. + state + .metadata_mut::() + .unwrap() + .left_to_sync + .retain(|p| p != &path); + log::debug!("Syncing and evaluating {:?}", path); + fuzzer.evaluate_input(state, executor, manager, input)?; } #[cfg(feature = "introspection")] @@ -149,71 +144,30 @@ where } #[inline] - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { + fn should_restart(&mut self, state: &mut Self::State) -> Result { // TODO: Needs proper crash handling for when an imported testcase crashes // For now, Make sure we don't get stuck crashing on this testcase - RetryRestartHelper::restart_progress_should_run(state, self, 3) + RetryCountRestartHelper::no_retry(state, &self.name) } #[inline] - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - RetryRestartHelper::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + RetryCountRestartHelper::clear_progress(state, &self.name) } } impl SyncFromDiskStage { /// Creates a new [`SyncFromDiskStage`] #[must_use] - pub fn new(sync_dir: PathBuf, load_callback: CB) -> Self { + pub fn new(sync_dirs: Vec, load_callback: CB, interval: Duration, name: &str) -> Self { Self { - name: Cow::Borrowed(SYNC_FROM_DISK_STAGE_NAME), + name: Cow::Owned(SYNC_FROM_DISK_STAGE_NAME.to_owned() + ":" + name), phantom: PhantomData, - sync_dir, + sync_dirs, + interval, load_callback, } } - - fn load_from_directory( - &self, - last: &Option, - ) -> Result<(Option, Vec), Error> { - let mut max_time = None; - let mut left_to_sync = Vec::::new(); - let in_dir = self.sync_dir.clone(); - - for entry in fs::read_dir(in_dir)? { - let entry = entry?; - let path = entry.path(); - let attributes = fs::metadata(&path); - - if attributes.is_err() { - continue; - } - - let attr = attributes?; - - if attr.is_file() && attr.len() > 0 { - if let Ok(time) = attr.modified() { - if let Some(l) = last { - if time.duration_since(*l).is_err() || time == *l { - continue; - } - } - max_time = Some(max_time.map_or(time, |t: SystemTime| t.max(time))); - log::info!("Syncing file: {:?}", path); - left_to_sync.push(path.clone()); - } - } else if attr.is_dir() { - let (dir_max_time, dir_left_to_sync) = self.load_from_directory(last)?; - if let Some(time) = dir_max_time { - max_time = Some(max_time.map_or(time, |t: SystemTime| t.max(time))); - } - left_to_sync.extend(dir_left_to_sync); - } - } - - Ok((max_time, left_to_sync)) - } } /// Function type when the callback in `SyncFromDiskStage` is not a lambda @@ -228,7 +182,7 @@ where { /// Creates a new [`SyncFromDiskStage`] invoking `Input::from_file` to load inputs #[must_use] - pub fn with_from_file(sync_dir: PathBuf) -> Self { + pub fn with_from_file(sync_dirs: Vec, interval: Duration) -> Self { fn load_callback( _: &mut Z, _: &mut S, @@ -237,8 +191,9 @@ where Input::from_file(p) } Self { + interval, name: Cow::Borrowed(SYNC_FROM_DISK_STAGE_NAME), - sync_dir, + sync_dirs, load_callback: load_callback::<_, _>, phantom: PhantomData, } @@ -293,14 +248,17 @@ where impl Stage for SyncFromBrokerStage where EM: UsesState + EventFirer, - S: State + HasExecutions + HasCorpus + HasRand + HasMetadata + HasTestcase, + S: State + HasExecutions + HasCorpus + HasRand + HasMetadata, SP: ShMemProvider, - E: HasObservers + Executor, + E: HasObservers + Executor, for<'a> E::Observers: Deserialize<'a>, - Z: EvaluatorObservers + ExecutionProcessor, + Z: EvaluatorObservers + + ExecutionProcessor, IC: InputConverter, ICB: InputConverter, DI: Input, + <::Corpus as Corpus>::Input: Clone, + S::Corpus: Corpus, // delete me { #[inline] fn perform( @@ -331,8 +289,9 @@ where corpus_size: 0, // TODO choose if sending 0 or the actual real value client_config: EventConfig::AlwaysUnique, time: current_time(), - executions: 0, forward_id: None, + #[cfg(all(unix, feature = "std", feature = "multi_machine"))] + node_id: None, }, )?; @@ -360,13 +319,13 @@ where } #[inline] - fn restart_progress_should_run(&mut self, _state: &mut Self::State) -> Result { + fn should_restart(&mut self, _state: &mut Self::State) -> Result { // No restart handling needed - does not execute the target. Ok(true) } #[inline] - fn clear_restart_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { + fn clear_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { // Not needed - does not execute the target. Ok(()) } diff --git a/libafl/src/stages/time_tracker.rs b/libafl/src/stages/time_tracker.rs new file mode 100644 index 0000000000..ee08d0dec8 --- /dev/null +++ b/libafl/src/stages/time_tracker.rs @@ -0,0 +1,80 @@ +//! Stage that wraps another stage and tracks it's execution time in `State` +use std::{marker::PhantomData, time::Duration}; + +use libafl_bolts::{current_time, Error}; + +use crate::{ + inputs::UsesInput, + stages::Stage, + state::{State, UsesState}, + HasMetadata, +}; +/// Track an inner Stage's execution time +#[derive(Debug)] +pub struct TimeTrackingStageWrapper { + inner: ST, + count: Duration, + phantom: PhantomData<(T, S)>, +} + +impl TimeTrackingStageWrapper { + /// Create a `TimeTrackingStageWrapper` + pub fn new(inner: ST) -> Self { + Self { + inner, + count: Duration::from_secs(0), + phantom: PhantomData, + } + } +} + +impl UsesState for TimeTrackingStageWrapper +where + S: State + HasMetadata, +{ + type State = S; +} + +impl Stage for TimeTrackingStageWrapper +where + S: UsesInput + State + HasMetadata, + ST: Stage, + M: UsesState, + Z: UsesState, + E: UsesState, + T: libafl_bolts::serdeany::SerdeAny + From, +{ + fn perform( + &mut self, + fuzzer: &mut Z, + executor: &mut E, + state: &mut Self::State, + manager: &mut M, + ) -> Result<(), Error> { + let before_run = current_time(); + self.inner.perform(fuzzer, executor, state, manager)?; + let after_run = current_time(); + self.count += after_run - before_run; + *state.metadata_mut::()? = T::from(self.count); + Ok(()) + } + + fn should_restart(&mut self, state: &mut Self::State) -> Result { + self.inner.should_restart(state) + } + + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + self.inner.clear_progress(state) + } + + fn perform_restartable( + &mut self, + fuzzer: &mut Z, + executor: &mut E, + state: &mut Self::State, + manager: &mut M, + ) -> Result<(), Error> { + self.inner + .perform_restartable(fuzzer, executor, state, manager) + } +} diff --git a/libafl/src/stages/tmin.rs b/libafl/src/stages/tmin.rs index 6135390022..d77299e73f 100644 --- a/libafl/src/stages/tmin.rs +++ b/libafl/src/stages/tmin.rs @@ -1,13 +1,17 @@ //! The [`TMinMutationalStage`] is a stage which will attempt to minimize corpus entries. -use alloc::borrow::Cow; +use alloc::{ + borrow::{Cow, ToOwned}, + string::ToString, +}; use core::{borrow::BorrowMut, fmt::Debug, hash::Hash, marker::PhantomData}; use ahash::RandomState; use libafl_bolts::{ - tuples::{Handle, Handled, MatchNameRef}, + tuples::{Handle, Handled, MatchName, MatchNameRef}, HasLen, Named, }; +use serde::Serialize; #[cfg(feature = "track_hit_feedbacks")] use crate::feedbacks::premature_last_result_err; @@ -15,7 +19,7 @@ use crate::{ corpus::{Corpus, HasCurrentCorpusId, Testcase}, events::EventFirer, executors::{ExitKind, HasObservers}, - feedbacks::{Feedback, FeedbackFactory, HasObserverHandle}, + feedbacks::{Feedback, FeedbackFactory, HasObserverHandle, StateInitializer}, inputs::UsesInput, mark_feature_time, mutators::{MutationResult, Mutator}, @@ -29,7 +33,8 @@ use crate::{ state::{ HasCorpus, HasCurrentTestcase, HasExecutions, HasMaxSize, HasSolutions, State, UsesState, }, - Error, ExecutesInput, ExecutionProcessor, HasFeedback, HasMetadata, HasScheduler, + Error, ExecutesInput, ExecutionProcessor, HasFeedback, HasMetadata, HasNamedMetadata, + HasScheduler, }; #[cfg(feature = "introspection")] use crate::{monitors::PerfFeature, state::HasClientPerfMonitor}; @@ -40,9 +45,10 @@ pub trait TMinMutationalStage: Stage + FeedbackFactory where E: UsesState + HasObservers, + E::Observers: ObserversTuple + Serialize, EM: UsesState + EventFirer, - F: Feedback, - Self::State: HasMaxSize + HasCorpus + HasSolutions + HasExecutions, + F: Feedback, + Self::State: HasMaxSize + HasCorpus + HasSolutions + HasExecutions + HasCurrentTestcase, Self::Input: MutatedTransform + Clone + Hash + HasLen, IP: Clone + MutatedTransformPost, M: Mutator, @@ -50,8 +56,10 @@ where + HasScheduler + HasFeedback + ExecutesInput - + ExecutionProcessor, - Z::Scheduler: RemovableScheduler, + + ExecutionProcessor, + Z::Feedback: Feedback, + Z::Scheduler: RemovableScheduler, + <::State as HasCorpus>::Corpus: Corpus, { /// The mutator registered for this stage fn mutator(&self) -> &M; @@ -71,7 +79,7 @@ where state: &mut Self::State, manager: &mut EM, ) -> Result<(), Error> { - let Some(base_corpus_idx) = state.current_corpus_id()? else { + let Some(base_corpus_id) = state.current_corpus_id()? else { return Err(Error::illegal_state( "state is not currently processing a corpus index", )); @@ -79,8 +87,14 @@ where let orig_max_size = state.max_size(); // basically copy-pasted from mutational.rs - let num = self.iterations(state)? - - usize::try_from(self.execs_since_progress_start(state)?).unwrap(); + let num = self + .iterations(state)? + .saturating_sub(usize::try_from(self.execs_since_progress_start(state)?)?); + + // If num is negative, then quit. + if num == 0 { + return Ok(()); + } start_timer!(state); let transformed = @@ -118,7 +132,7 @@ where } let (input, post) = input_transformed.try_transform_into(state)?; - let corpus_idx = if input.len() < before_len { + let corpus_id = if input.len() < before_len { // run the input let exit_kind = fuzzer.execute_input(state, executor, manager, &input)?; let observers = executor.observers(); @@ -129,7 +143,7 @@ where // TODO replace if process_execution adds a return value for solution index let solution_count = state.solutions().count(); let corpus_count = state.corpus().count(); - let (_, corpus_idx) = fuzzer.execute_and_process( + let (_, corpus_id) = fuzzer.evaluate_execution( state, manager, input.clone(), @@ -152,7 +166,7 @@ where } } - corpus_idx + corpus_id } else { // we can't guarantee that the mutators provided will necessarily reduce size, so // skip any mutations that actually increase size so we don't waste eval time @@ -160,8 +174,8 @@ where }; start_timer!(state); - self.mutator_mut().post_exec(state, corpus_idx)?; - post.post_exec(state, corpus_idx)?; + self.mutator_mut().post_exec(state, corpus_id)?; + post.post_exec(state, corpus_id)?; mark_feature_time!(state, PerfFeature::MutatePostExec); i = next_i; @@ -176,19 +190,19 @@ where fuzzer .feedback_mut() .is_interesting(state, manager, &base, &*observers, &exit_kind)?; - let mut testcase = Testcase::with_executions(base, *state.executions()); + let mut testcase = Testcase::from(base); fuzzer .feedback_mut() .append_metadata(state, manager, &*observers, &mut testcase)?; - let prev = state.corpus_mut().replace(base_corpus_idx, testcase)?; + let prev = state.corpus_mut().replace(base_corpus_id, testcase)?; fuzzer .scheduler_mut() - .on_replace(state, base_corpus_idx, &prev)?; + .on_replace(state, base_corpus_id, &prev)?; // perform the post operation for the new testcase, e.g. to update metadata. // base_post should be updated along with the base (and is no longer None) base_post .ok_or_else(|| Error::empty_optional("Failed to get the MutatedTransformPost"))? - .post_exec(state, Some(base_corpus_idx))?; + .post_exec(state, Some(base_corpus_id))?; } state.set_max_size(orig_max_size); @@ -203,6 +217,8 @@ where /// The default corpus entry minimising mutational stage #[derive(Clone, Debug)] pub struct StdTMinMutationalStage { + /// The name + name: Cow<'static, str>, /// The mutator(s) this stage uses mutator: M, /// The factory @@ -224,23 +240,27 @@ where impl Stage for StdTMinMutationalStage where - Z: HasScheduler + ExecutionProcessor + ExecutesInput + HasFeedback, - Z::Scheduler: RemovableScheduler, - E: HasObservers, + Z: HasScheduler + ExecutionProcessor + ExecutesInput + HasFeedback, + Z::Scheduler: RemovableScheduler, + E: HasObservers + UsesState, + E::Observers: ObserversTuple + Serialize, EM: EventFirer, FF: FeedbackFactory, - F: Feedback, + F: Feedback, Self::Input: MutatedTransform + Clone + HasLen + Hash, - Self::State: HasMetadata + HasExecutions + HasSolutions + HasCorpus + HasMaxSize, + Z::State: + HasMetadata + HasExecutions + HasSolutions + HasCorpus + HasMaxSize + HasNamedMetadata, + Z::Feedback: Feedback, M: Mutator, IP: MutatedTransformPost + Clone, + <::State as HasCorpus>::Corpus: Corpus, // delete me { - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - self.restart_helper.restart_progress_should_run(state) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + self.restart_helper.should_restart(state, &self.name) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - self.restart_helper.clear_restart_progress(state) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + self.restart_helper.clear_progress(state, &self.name) } fn perform( @@ -270,19 +290,39 @@ where } } +impl Named for StdTMinMutationalStage { + fn name(&self) -> &Cow<'static, str> { + &self.name + } +} + +/// The counter for giving this stage unique id +static mut TMIN_STAGE_ID: usize = 0; +/// The name for tmin stage +pub static TMIN_STAGE_NAME: &str = "tmin"; + impl TMinMutationalStage for StdTMinMutationalStage where - Z: HasScheduler + ExecutionProcessor + ExecutesInput + HasFeedback, - Z::Scheduler: RemovableScheduler, - E: HasObservers, + Z: HasScheduler + ExecutionProcessor + ExecutesInput + HasFeedback, + Z::Scheduler: RemovableScheduler, + E: HasObservers + UsesState, + E::Observers: ObserversTuple + Serialize, EM: EventFirer, FF: FeedbackFactory, - F: Feedback, + F: Feedback, Self::Input: MutatedTransform + Clone + HasLen + Hash, - Self::State: HasMetadata + HasExecutions + HasSolutions + HasCorpus + HasMaxSize, + Z::State: HasMetadata + + HasExecutions + + HasSolutions + + HasCorpus + + HasMaxSize + + HasNamedMetadata + + HasCurrentTestcase, + Z::Feedback: Feedback, M: Mutator, IP: MutatedTransformPost + Clone, + <::State as HasCorpus>::Corpus: Corpus, // delete me { /// The mutator, added to this stage #[inline] @@ -302,14 +342,22 @@ where } fn execs_since_progress_start(&mut self, state: &mut Self::State) -> Result { - self.restart_helper.execs_since_progress_start(state) + self.restart_helper + .execs_since_progress_start(state, &self.name) } } impl StdTMinMutationalStage { /// Creates a new minimizing mutational stage that will minimize provided corpus entries pub fn new(mutator: M, factory: FF, runs: usize) -> Self { + // unsafe but impossible that you create two threads both instantiating this instance + let stage_id = unsafe { + let ret = TMIN_STAGE_ID; + TMIN_STAGE_ID += 1; + ret + }; Self { + name: Cow::Owned(TMIN_STAGE_NAME.to_owned() + ":" + stage_id.to_string().as_str()), mutator, factory, runs, @@ -346,24 +394,23 @@ impl HasObserverHandle for MapEqualityFeedback { } } -impl Feedback for MapEqualityFeedback +impl StateInitializer for MapEqualityFeedback {} + +impl Feedback for MapEqualityFeedback where M: MapObserver, C: AsRef, S: State, + OT: MatchName, { - fn is_interesting( + fn is_interesting( &mut self, _state: &mut S, _manager: &mut EM, - _input: &S::Input, + _input: &I, observers: &OT, _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { + ) -> Result { let obs = observers .get(self.observer_handle()) .expect("Should have been provided valid observer name."); @@ -413,7 +460,7 @@ impl FeedbackFactory, OT> for MapEqual where M: MapObserver, C: AsRef + Handled, - OT: ObserversTuple, + OT: ObserversTuple, S: UsesInput, { fn create_feedback(&self, observers: &OT) -> MapEqualityFeedback { diff --git a/libafl/src/stages/tracing.rs b/libafl/src/stages/tracing.rs index 052adc98b2..6c7fa6406e 100644 --- a/libafl/src/stages/tracing.rs +++ b/libafl/src/stages/tracing.rs @@ -1,15 +1,19 @@ //! The tracing stage can trace the target and enrich a testcase with metadata, for example for `CmpLog`. -use alloc::borrow::Cow; +use alloc::{ + borrow::{Cow, ToOwned}, + string::ToString, +}; use core::{fmt::Debug, marker::PhantomData}; use libafl_bolts::Named; use crate::{ + corpus::Corpus, executors::{Executor, HasObservers, ShadowExecutor}, mark_feature_time, observers::ObserversTuple, - stages::{RetryRestartHelper, Stage}, + stages::{RetryCountRestartHelper, Stage}, start_timer, state::{HasCorpus, HasCurrentTestcase, HasExecutions, State, UsesState}, Error, HasNamedMetadata, @@ -20,8 +24,8 @@ use crate::{monitors::PerfFeature, state::HasClientPerfMonitor}; /// A stage that runs a tracer executor #[derive(Clone, Debug)] pub struct TracingStage { + name: Cow<'static, str>, tracer_executor: TE, - max_retries: usize, #[allow(clippy::type_complexity)] phantom: PhantomData<(EM, TE, Z)>, } @@ -36,13 +40,15 @@ where impl TracingStage where TE: Executor + HasObservers, - ::State: HasExecutions + HasCorpus + HasNamedMetadata, + TE::Observers: ObserversTuple::State>, + ::State: HasExecutions + HasCorpus + HasNamedMetadata + HasCurrentTestcase, EM: UsesState::State>, Z: UsesState::State>, + <::State as HasCorpus>::Corpus: Corpus, // delete me { #[allow(rustdoc::broken_intra_doc_links)] /// Perform tracing on the given `CorpusId`. Useful for if wrapping [`TracingStage`] with your - /// own stage and you need to manage [`super::NestedStageRestartHelper`] differently + /// own stage and you need to manage [`super::NestedStageRetryCountRestartHelper`] differently /// see [`super::ConcolicTracingStage`]'s implementation as an example of usage. pub fn trace( &mut self, @@ -67,8 +73,6 @@ where .run_target(fuzzer, state, manager, &input)?; mark_feature_time!(state, PerfFeature::TargetExecution); - *state.executions_mut() += 1; - start_timer!(state); self.tracer_executor .observers_mut() @@ -83,9 +87,11 @@ impl Stage for TracingStage where E: UsesState::State>, TE: Executor + HasObservers, - ::State: HasExecutions + HasCorpus + HasNamedMetadata, + TE::Observers: ObserversTuple::State>, + ::State: HasExecutions + HasCorpus + HasNamedMetadata, EM: UsesState::State>, Z: UsesState::State>, + <::State as HasCorpus>::Corpus: Corpus, // delete me { #[inline] fn perform( @@ -98,40 +104,43 @@ where self.trace(fuzzer, state, manager) } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - RetryRestartHelper::restart_progress_should_run(state, self, self.max_retries) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + RetryCountRestartHelper::no_retry(state, &self.name) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - RetryRestartHelper::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + RetryCountRestartHelper::clear_progress(state, &self.name) } } impl Named for TracingStage { fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed("TracingStage"); - &NAME + &self.name } } +/// The counter for giving this stage unique id +static mut TRACING_STAGE_ID: usize = 0; +/// The name for tracing stage +pub static TRACING_STAGE_NAME: &str = "tracing"; + impl TracingStage { /// Creates a new default stage pub fn new(tracer_executor: TE) -> Self { + // unsafe but impossible that you create two threads both instantiating this instance + let stage_id = unsafe { + let ret = TRACING_STAGE_ID; + TRACING_STAGE_ID += 1; + ret + }; + Self { + name: Cow::Owned(TRACING_STAGE_NAME.to_owned() + ":" + stage_id.to_string().as_ref()), tracer_executor, - max_retries: 10, phantom: PhantomData, } } - /// Specify how many times that this stage will try again to trace the input before giving up - /// and not processing the input again. 0 retries means that the trace will be tried only once. - #[must_use] - pub fn with_retries(mut self, retries: usize) -> Self { - self.max_retries = retries; - self - } - /// Gets the underlying tracer executor pub fn executor(&self) -> &TE { &self.tracer_executor @@ -146,7 +155,7 @@ impl TracingStage { /// A stage that runs the shadow executor using also the shadow observers #[derive(Clone, Debug)] pub struct ShadowTracingStage { - max_retries: usize, + name: Cow<'static, str>, #[allow(clippy::type_complexity)] phantom: PhantomData<(E, EM, SOT, Z)>, } @@ -157,24 +166,30 @@ where { type State = E::State; } +/// The counter for giving this stage unique id +static mut SHADOW_TRACING_STAGE_ID: usize = 0; +/// Name for shadow tracing stage +pub static SHADOW_TRACING_STAGE_NAME: &str = "shadow"; impl Named for ShadowTracingStage where E: UsesState, { fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed("ShadowTracingStage"); - &NAME + &self.name } } impl Stage, EM, Z> for ShadowTracingStage where E: Executor + HasObservers, + E::Observers: ObserversTuple, EM: UsesState::State>, - SOT: ObserversTuple, + SOT: ObserversTuple, Z: UsesState::State>, - ::State: State + HasExecutions + HasCorpus + HasNamedMetadata + Debug, + ::State: + State + HasExecutions + HasCorpus + HasNamedMetadata + Debug + HasCurrentTestcase, + <::State as HasCorpus>::Corpus: Corpus, // delete me { #[inline] fn perform( @@ -200,8 +215,6 @@ where let exit_kind = executor.run_target(fuzzer, state, manager, &input)?; mark_feature_time!(state, PerfFeature::TargetExecution); - *state.executions_mut() += 1; - start_timer!(state); executor .shadow_observers_mut() @@ -214,12 +227,12 @@ where Ok(()) } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - RetryRestartHelper::restart_progress_should_run(state, self, self.max_retries) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + RetryCountRestartHelper::no_retry(state, &self.name) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - RetryRestartHelper::clear_restart_progress(state, self) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + RetryCountRestartHelper::clear_progress(state, &self.name) } } @@ -228,22 +241,22 @@ where E: Executor + HasObservers, ::State: State + HasExecutions + HasCorpus, EM: UsesState::State>, - SOT: ObserversTuple, + SOT: ObserversTuple, Z: UsesState::State>, { /// Creates a new default stage pub fn new(_executor: &mut ShadowExecutor) -> Self { + // unsafe but impossible that you create two threads both instantiating this instance + let stage_id = unsafe { + let ret = SHADOW_TRACING_STAGE_ID; + SHADOW_TRACING_STAGE_ID += 1; + ret + }; Self { - max_retries: 10, + name: Cow::Owned( + SHADOW_TRACING_STAGE_NAME.to_owned() + ":" + stage_id.to_string().as_str(), + ), phantom: PhantomData, } } - - /// Specify how many times that this stage will try again to trace the input before giving up - /// and not processing the input again. 0 retries means that the trace will be tried only once. - #[must_use] - pub fn with_retries(mut self, retries: usize) -> Self { - self.max_retries = retries; - self - } } diff --git a/libafl/src/stages/tuneable.rs b/libafl/src/stages/tuneable.rs index 8e58ef5509..b293282ae3 100644 --- a/libafl/src/stages/tuneable.rs +++ b/libafl/src/stages/tuneable.rs @@ -7,8 +7,10 @@ use libafl_bolts::{current_time, impl_serdeany, rands::Rand}; use serde::{Deserialize, Serialize}; use crate::{ + corpus::Corpus, mark_feature_time, mutators::{MutationResult, Mutator}, + nonzero, stages::{ mutational::{MutatedTransform, MutatedTransformPost, DEFAULT_MUTATIONAL_MAX_ITERATIONS}, ExecutionCountRestartHelper, MutationalStage, Stage, @@ -164,8 +166,10 @@ where EM: UsesState, M: Mutator, Z: Evaluator, - Self::State: HasCorpus + HasRand + HasNamedMetadata + HasMetadata + HasExecutions, + Z::State: + HasCorpus + HasRand + HasNamedMetadata + HasMetadata + HasExecutions + HasCurrentTestcase, I: MutatedTransform + Clone, + <::State as HasCorpus>::Corpus: Corpus, // delete me { /// Runs this (mutational) stage for the given `testcase` /// Exactly the same functionality as [`MutationalStage::perform_mutational`], but with added timeout support. @@ -245,13 +249,11 @@ where fn iterations(&self, state: &mut Self::State) -> Result { Ok( // fall back to random - 1 + state.rand_mut().below(DEFAULT_MUTATIONAL_MAX_ITERATIONS), + 1 + state + .rand_mut() + .below(nonzero!(DEFAULT_MUTATIONAL_MAX_ITERATIONS)), ) } - - fn execs_since_progress_start(&mut self, state: &mut Self::State) -> Result { - self.restart_helper.execs_since_progress_start(state) - } } impl UsesState for TuneableMutationalStage @@ -267,8 +269,10 @@ where EM: UsesState, M: Mutator, Z: Evaluator, - Self::State: HasCorpus + HasRand + HasNamedMetadata + HasMetadata + HasExecutions, + Z::State: + HasCorpus + HasRand + HasNamedMetadata + HasMetadata + HasExecutions + HasCurrentTestcase, I: MutatedTransform + Clone, + <::State as HasCorpus>::Corpus: Corpus, // delete me { #[inline] #[allow(clippy::let_and_return)] @@ -287,12 +291,12 @@ where ret } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - self.restart_helper.restart_progress_should_run(state) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + self.restart_helper.should_restart(state, &self.name) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { - self.restart_helper.clear_restart_progress(state) + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + self.restart_helper.clear_progress(state, &self.name) } } @@ -302,10 +306,19 @@ where EM: UsesState::State>, M: Mutator::State>, Z: Evaluator, - ::State: - HasCorpus + HasRand + HasNamedMetadata + HasMetadata + HasExecutions, + ::State: + HasCorpus + HasRand + HasNamedMetadata + HasExecutions + HasMetadata + HasCurrentTestcase, I: MutatedTransform::State> + Clone, + <::State as HasCorpus>::Corpus: Corpus, // delete me { + fn execs_since_progress_start( + &mut self, + state: &mut ::State, + ) -> Result { + self.restart_helper + .execs_since_progress_start(state, &self.name) + } + /// Creates a new default tuneable mutational stage #[must_use] pub fn new(state: &mut ::State, mutator: M) -> Self { @@ -456,11 +469,11 @@ where // Time is measured directly the `evaluate_input` function let (untransformed, post) = input.try_transform_into(state)?; - let (_, corpus_idx) = fuzzer.evaluate_input(state, executor, manager, untransformed)?; + let (_, corpus_id) = fuzzer.evaluate_input(state, executor, manager, untransformed)?; start_timer!(state); - self.mutator_mut().post_exec(state, corpus_idx)?; - post.post_exec(state, corpus_idx)?; + self.mutator_mut().post_exec(state, corpus_id)?; + post.post_exec(state, corpus_id)?; mark_feature_time!(state, PerfFeature::MutatePostExec); Ok(()) diff --git a/libafl/src/stages/unicode.rs b/libafl/src/stages/unicode.rs index 0a661ab306..dc636bce8d 100644 --- a/libafl/src/stages/unicode.rs +++ b/libafl/src/stages/unicode.rs @@ -8,8 +8,8 @@ use libafl_bolts::{impl_serdeany, Error}; use serde::{Deserialize, Serialize}; use crate::{ - corpus::HasTestcase, - inputs::{BytesInput, HasMutatorBytes}, + corpus::Corpus, + inputs::{BytesInput, HasTargetBytes}, stages::Stage, state::{HasCorpus, HasCurrentTestcase, State, UsesState}, HasMetadata, @@ -89,6 +89,24 @@ impl UnicodeIdentificationStage { phantom: PhantomData, } } + fn identify_unicode_in_current_testcase(state: &mut S) -> Result<(), Error> + where + S: HasCurrentTestcase, + ::Input: HasTargetBytes, + { + let mut tc = state.current_testcase_mut()?; + if tc.has_metadata::() { + return Ok(()); // skip recompute + } + + let input = tc.load_input(state.corpus())?; + + let bytes = input.target_bytes(); + let metadata = extract_metadata(&bytes); + tc.add_metadata(metadata); + + Ok(()) + } } impl UsesState for UnicodeIdentificationStage @@ -100,7 +118,8 @@ where impl Stage for UnicodeIdentificationStage where - S: HasTestcase + HasCorpus + State, + S: HasCorpus + State + HasCurrentTestcase, + S::Corpus: Corpus, E: UsesState, EM: UsesState, Z: UsesState, @@ -112,28 +131,17 @@ where state: &mut Self::State, _manager: &mut EM, ) -> Result<(), Error> { - let mut tc = state.current_testcase_mut()?; - if tc.has_metadata::() { - return Ok(()); // skip recompute - } - - let input = tc.load_input(state.corpus())?; - - let bytes = input.bytes(); - let metadata = extract_metadata(bytes); - tc.add_metadata(metadata); - - Ok(()) + UnicodeIdentificationStage::identify_unicode_in_current_testcase(state) } #[inline] - fn restart_progress_should_run(&mut self, _state: &mut Self::State) -> Result { + fn should_restart(&mut self, _state: &mut Self::State) -> Result { // Stage does not run the target. No reset helper needed. Ok(true) } #[inline] - fn clear_restart_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { + fn clear_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { // Stage does not run the target. No reset helper needed. Ok(()) } diff --git a/libafl/src/stages/verify_timeouts.rs b/libafl/src/stages/verify_timeouts.rs new file mode 100644 index 0000000000..19749083c0 --- /dev/null +++ b/libafl/src/stages/verify_timeouts.rs @@ -0,0 +1,132 @@ +#![allow(clippy::too_long_first_doc_paragraph)] +//! Stage that re-runs captured Timeouts with double the timeout to verify +//! Note: To capture the timeouts, use in conjunction with `CaptureTimeoutFeedback` +//! Note: Will NOT work with in process executors due to the potential for restarts/crashes when +//! running inputs. +use core::time::Duration; +use std::{cell::RefCell, collections::VecDeque, fmt::Debug, marker::PhantomData, rc::Rc}; + +use libafl_bolts::Error; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; + +use crate::{ + corpus::Corpus, + executors::{Executor, HasObservers, HasTimeout}, + inputs::{BytesInput, UsesInput}, + observers::ObserversTuple, + stages::Stage, + state::{HasCorpus, State, UsesState}, + Evaluator, HasMetadata, +}; + +/// Stage that re-runs inputs deemed as timeouts with double the timeout to assert that they are +/// not false positives. AFL++ style. +/// Note: Will NOT work with in process executors due to the potential for restarts/crashes when +/// running inputs. +#[derive(Debug)] +pub struct VerifyTimeoutsStage { + doubled_timeout: Duration, + original_timeout: Duration, + capture_timeouts: Rc>, + phantom: PhantomData<(E, S)>, +} + +impl VerifyTimeoutsStage { + /// Create a `VerifyTimeoutsStage` + pub fn new(capture_timeouts: Rc>, configured_timeout: Duration) -> Self { + Self { + capture_timeouts, + doubled_timeout: configured_timeout * 2, + original_timeout: configured_timeout, + phantom: PhantomData, + } + } +} + +impl UsesState for VerifyTimeoutsStage +where + S: State, +{ + type State = S; +} + +/// Timeouts that `VerifyTimeoutsStage` will read from +#[derive(Default, Serialize, Deserialize, Clone, Debug)] +#[serde(bound = "I: for<'a> Deserialize<'a> + Serialize")] +pub struct TimeoutsToVerify { + inputs: VecDeque, +} + +libafl_bolts::impl_serdeany!( + TimeoutsToVerify, + +); + +impl TimeoutsToVerify { + /// Create a new `TimeoutsToVerify` + #[must_use] + pub fn new() -> Self { + Self { + inputs: VecDeque::new(), + } + } + + /// Add a `TimeoutsToVerify` to queue + pub fn push(&mut self, input: I) { + self.inputs.push_back(input); + } + + /// Pop a `TimeoutsToVerify` to queue + pub fn pop(&mut self) -> Option { + self.inputs.pop_front() + } + + /// Count `TimeoutsToVerify` in queue + #[must_use] + pub fn count(&self) -> usize { + self.inputs.len() + } +} + +impl Stage for VerifyTimeoutsStage +where + E::Observers: ObserversTuple<::Input, ::State>, + E: Executor + HasObservers + HasTimeout, + EM: UsesState, + Z: UsesState + Evaluator, + S: HasCorpus + State + HasMetadata, + Self::Input: Debug + Serialize + DeserializeOwned + Default + 'static + Clone, + <::State as HasCorpus>::Corpus: Corpus, //delete me +{ + fn perform( + &mut self, + fuzzer: &mut Z, + executor: &mut E, + state: &mut Self::State, + manager: &mut EM, + ) -> Result<(), Error> { + let mut timeouts = state + .metadata_or_insert_with(TimeoutsToVerify::<::Input>::new) + .clone(); + if timeouts.count() == 0 { + return Ok(()); + } + executor.set_timeout(self.doubled_timeout); + *self.capture_timeouts.borrow_mut() = false; + while let Some(input) = timeouts.pop() { + fuzzer.evaluate_input(state, executor, manager, input)?; + } + executor.set_timeout(self.original_timeout); + *self.capture_timeouts.borrow_mut() = true; + let res = state.metadata_mut::>().unwrap(); + *res = TimeoutsToVerify::::new(); + Ok(()) + } + fn should_restart(&mut self, _state: &mut Self::State) -> Result { + Ok(true) + } + + fn clear_progress(&mut self, _state: &mut Self::State) -> Result<(), Error> { + Ok(()) + } +} diff --git a/libafl/src/state/mod.rs b/libafl/src/state/mod.rs index 1697b08ed7..0237aa8712 100644 --- a/libafl/src/state/mod.rs +++ b/libafl/src/state/mod.rs @@ -31,13 +31,13 @@ use crate::monitors::ClientPerfMonitor; #[cfg(feature = "scalability_introspection")] use crate::monitors::ScalabilityMonitor; use crate::{ - corpus::{Corpus, CorpusId, HasCurrentCorpusId, HasTestcase, Testcase}, + corpus::{Corpus, CorpusId, HasCurrentCorpusId, HasTestcase, InMemoryCorpus, Testcase}, events::{Event, EventFirer, LogSeverity}, - feedbacks::Feedback, + feedbacks::StateInitializer, fuzzer::{Evaluator, ExecuteInputResult}, generators::Generator, - inputs::{Input, UsesInput}, - stages::{HasCurrentStage, HasNestedStageStatus, StageId}, + inputs::{Input, NopInput, UsesInput}, + stages::{HasCurrentStageId, HasNestedStageStatus, StageId}, Error, HasMetadata, HasNamedMetadata, }; @@ -54,7 +54,8 @@ pub trait State: + MaybeHasClientPerfMonitor + MaybeHasScalabilityMonitor + HasCurrentCorpusId - + HasCurrentStage + + HasCurrentStageId + + Stoppable { } @@ -73,9 +74,9 @@ where } /// Trait for elements offering a corpus -pub trait HasCorpus: UsesInput { +pub trait HasCorpus { /// The associated type implementing [`Corpus`]. - type Corpus: Corpus::Input>; + type Corpus: Corpus; /// The testcase corpus fn corpus(&self) -> &Self::Corpus; @@ -83,6 +84,22 @@ pub trait HasCorpus: UsesInput { fn corpus_mut(&mut self) -> &mut Self::Corpus; } +// Reflexivity +impl HasCorpus for C +where + C: Corpus, +{ + type Corpus = Self; + + fn corpus(&self) -> &Self::Corpus { + self + } + + fn corpus_mut(&mut self) -> &mut Self::Corpus { + self + } +} + /// Interact with the maximum size pub trait HasMaxSize { /// The maximum size hint for items and mutations returned @@ -92,9 +109,9 @@ pub trait HasMaxSize { } /// Trait for elements offering a corpus of solutions -pub trait HasSolutions: UsesInput { +pub trait HasSolutions { /// The associated type implementing [`Corpus`] for solutions - type Solutions: Corpus::Input>; + type Solutions: Corpus; /// The solutions corpus fn solutions(&self) -> &Self::Solutions; @@ -186,6 +203,15 @@ pub trait HasStartTime { fn start_time_mut(&mut self) -> &mut Duration; } +/// Trait for the last report time, the last time this node reported progress +pub trait HasLastFoundTime { + /// The last time we found something by ourselves + fn last_found_time(&self) -> &Duration; + + /// The last time we found something by ourselves (mutable) + fn last_found_time_mut(&mut self) -> &mut Duration; +} + /// Trait for the last report time, the last time this node reported progress pub trait HasLastReportTime { /// The last time we reported progress,if available/used. @@ -209,7 +235,7 @@ pub struct LoadConfig<'a, I, S, Z> { } #[cfg(feature = "std")] -impl<'a, I, S, Z> Debug for LoadConfig<'a, I, S, Z> { +impl Debug for LoadConfig<'_, I, S, Z> { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { write!(f, "LoadConfig {{}}") } @@ -250,7 +276,7 @@ pub struct StdState { /// Remaining initial inputs to load, if any remaining_initial_files: Option>, #[cfg(feature = "std")] - /// Remaining initial inputs to load, if any + /// symlinks we have already traversed when loading `remaining_initial_files` dont_reenter: Option>, #[cfg(feature = "std")] /// If inputs have been processed for multicore loading @@ -259,8 +285,13 @@ pub struct StdState { /// The last time we reported progress (if available/used). /// This information is used by fuzzer `maybe_report_progress`. last_report_time: Option, + /// The last time something was added to the corpus + last_found_time: Duration, /// The current index of the corpus; used to record for resumable fuzzing. - corpus_idx: Option, + corpus_id: Option, + /// Request the fuzzer to stop at the start of the next stage + /// or at the beginning of the next fuzzing iteration + stop_requested: bool, stage_stack: StageStack, phantom: PhantomData, } @@ -274,9 +305,9 @@ where impl State for StdState where - C: Corpus, + C: Corpus + Serialize + DeserializeOwned, R: Rand, - SC: Corpus, + SC: Corpus + Serialize + DeserializeOwned, Self: UsesInput, { } @@ -302,9 +333,7 @@ where impl HasCorpus for StdState where - I: Input, - C: Corpus::Input>, - R: Rand, + C: Corpus, { type Corpus = C; @@ -323,23 +352,15 @@ where impl HasTestcase for StdState where - I: Input, - C: Corpus::Input>, - R: Rand, + C: Corpus, { /// To get the testcase - fn testcase( - &self, - id: CorpusId, - ) -> Result::Input>>, Error> { + fn testcase(&self, id: CorpusId) -> Result>, Error> { Ok(self.corpus().get(id)?.borrow()) } /// To get mutable testcase - fn testcase_mut( - &self, - id: CorpusId, - ) -> Result::Input>>, Error> { + fn testcase_mut(&self, id: CorpusId) -> Result>, Error> { Ok(self.corpus().get(id)?.borrow_mut()) } } @@ -420,6 +441,20 @@ impl HasImported for StdState { } } +impl HasLastFoundTime for StdState { + /// Return the number of new paths that imported from other fuzzers + #[inline] + fn last_found_time(&self) -> &Duration { + &self.last_found_time + } + + /// Return the number of new paths that imported from other fuzzers + #[inline] + fn last_found_time_mut(&mut self) -> &mut Duration { + &mut self.last_found_time + } +} + impl HasLastReportTime for StdState { /// The last time we reported progress,if available/used. /// This information is used by fuzzer `maybe_report_progress`. @@ -459,54 +494,56 @@ impl HasStartTime for StdState { } impl HasCurrentCorpusId for StdState { - fn set_corpus_idx(&mut self, idx: CorpusId) -> Result<(), Error> { - self.corpus_idx = Some(idx); + fn set_corpus_id(&mut self, id: CorpusId) -> Result<(), Error> { + self.corpus_id = Some(id); Ok(()) } - fn clear_corpus_idx(&mut self) -> Result<(), Error> { - self.corpus_idx = None; + fn clear_corpus_id(&mut self) -> Result<(), Error> { + self.corpus_id = None; Ok(()) } fn current_corpus_id(&self) -> Result, Error> { - Ok(self.corpus_idx) + Ok(self.corpus_id) } } /// Has information about the current [`Testcase`] we are fuzzing -pub trait HasCurrentTestcase -where - I: Input, -{ +pub trait HasCurrentTestcase: HasCorpus { /// Gets the current [`Testcase`] we are fuzzing /// - /// Will return [`Error::key_not_found`] if no `corpus_idx` is currently set. - fn current_testcase(&self) -> Result>, Error>; + /// Will return [`Error::key_not_found`] if no `corpus_id` is currently set. + fn current_testcase(&self) + -> Result::Input>>, Error>; //fn current_testcase(&self) -> Result<&Testcase, Error>; /// Gets the current [`Testcase`] we are fuzzing (mut) /// - /// Will return [`Error::key_not_found`] if no `corpus_idx` is currently set. - fn current_testcase_mut(&self) -> Result>, Error>; + /// Will return [`Error::key_not_found`] if no `corpus_id` is currently set. + fn current_testcase_mut( + &self, + ) -> Result::Input>>, Error>; //fn current_testcase_mut(&self) -> Result<&mut Testcase, Error>; /// Gets a cloned representation of the current [`Testcase`]. /// - /// Will return [`Error::key_not_found`] if no `corpus_idx` is currently set. + /// Will return [`Error::key_not_found`] if no `corpus_id` is currently set. /// /// # Note /// This allocates memory and copies the contents! /// For performance reasons, if you just need to access the testcase, use [`Self::current_testcase`] instead. - fn current_input_cloned(&self) -> Result; + fn current_input_cloned(&self) -> Result<::Input, Error>; } -impl HasCurrentTestcase for T +impl HasCurrentTestcase for T where - I: Input, - T: HasCorpus + HasCurrentCorpusId + UsesInput, + T: HasCorpus + HasCurrentCorpusId, + ::Input: Clone, { - fn current_testcase(&self) -> Result>, Error> { + fn current_testcase( + &self, + ) -> Result::Input>>, Error> { let Some(corpus_id) = self.current_corpus_id()? else { return Err(Error::key_not_found( "We are not currently processing a testcase", @@ -516,7 +553,9 @@ where Ok(self.corpus().get(corpus_id)?.borrow()) } - fn current_testcase_mut(&self) -> Result>, Error> { + fn current_testcase_mut( + &self, + ) -> Result::Input>>, Error> { let Some(corpus_id) = self.current_corpus_id()? else { return Err(Error::illegal_state( "We are not currently processing a testcase", @@ -526,23 +565,49 @@ where Ok(self.corpus().get(corpus_id)?.borrow_mut()) } - fn current_input_cloned(&self) -> Result { + fn current_input_cloned(&self) -> Result<::Input, Error> { let mut testcase = self.current_testcase_mut()?; Ok(testcase.borrow_mut().load_input(self.corpus())?.clone()) } } -impl HasCurrentStage for StdState { - fn set_current_stage_idx(&mut self, idx: StageId) -> Result<(), Error> { - self.stage_stack.set_current_stage_idx(idx) +/// A trait for types that want to expose a stop API +pub trait Stoppable { + /// Check if stop is requested + fn stop_requested(&self) -> bool; + + /// Request to stop + fn request_stop(&mut self); + + /// Discard the stop request + fn discard_stop_request(&mut self); +} + +impl Stoppable for StdState { + fn request_stop(&mut self) { + self.stop_requested = true; } - fn clear_stage(&mut self) -> Result<(), Error> { - self.stage_stack.clear_stage() + fn discard_stop_request(&mut self) { + self.stop_requested = false; } - fn current_stage_idx(&self) -> Result, Error> { - self.stage_stack.current_stage_idx() + fn stop_requested(&self) -> bool { + self.stop_requested + } +} + +impl HasCurrentStageId for StdState { + fn set_current_stage_id(&mut self, idx: StageId) -> Result<(), Error> { + self.stage_stack.set_current_stage_id(idx) + } + + fn clear_stage_id(&mut self) -> Result<(), Error> { + self.stage_stack.clear_stage_id() + } + + fn current_stage_id(&self) -> Result, Error> { + self.stage_stack.current_stage_id() } fn on_restart(&mut self) -> Result<(), Error> { @@ -568,7 +633,7 @@ where R: Rand, SC: Corpus::Input>, { - /// Decide if the state nust load the inputs + /// Decide if the state must load the inputs pub fn must_load_initial_inputs(&self) -> bool { self.corpus().count() == 0 || (self.remaining_initial_files.is_some() @@ -583,7 +648,7 @@ where if filename.starts_with('.') // || filename // .rsplit_once('-') - // .map_or(false, |(_, s)| u64::from_str(s).is_ok()) + // .is_some_and(|(_, s)| u64::from_str(s).is_ok()) { continue; } @@ -743,6 +808,28 @@ where Ok(()) } + /// Recursively walk supplied corpus directories + pub fn walk_initial_inputs( + &mut self, + in_dirs: &[PathBuf], + mut closure: F, + ) -> Result<(), Error> + where + F: FnMut(&PathBuf) -> Result<(), Error>, + { + self.canonicalize_input_dirs(in_dirs)?; + loop { + match self.next_file() { + Ok(path) => { + closure(&path)?; + } + Err(Error::IteratorEnd(_, _)) => break, + Err(e) => return Err(e), + } + } + self.reset_initial_files_state(); + Ok(()) + } /// Loads all intial inputs, even if they are not considered `interesting`. /// This is rarely the right method, use `load_initial_inputs`, /// and potentially fix your `Feedback`, instead. @@ -1074,19 +1161,22 @@ where objective: &mut O, ) -> Result where - F: Feedback, - O: Feedback, + F: StateInitializer, + O: StateInitializer, + C: Serialize + DeserializeOwned, + SC: Serialize + DeserializeOwned, { let mut state = Self { rand, executions: 0, imported: 0, - start_time: Duration::from_millis(0), + start_time: libafl_bolts::current_time(), metadata: SerdeAnyMap::default(), named_metadata: NamedSerdeAnyMap::default(), corpus, solutions, max_size: DEFAULT_MAX_SIZE, + stop_requested: false, #[cfg(feature = "introspection")] introspection_monitor: ClientPerfMonitor::new(), #[cfg(feature = "scalability_introspection")] @@ -1096,7 +1186,8 @@ where #[cfg(feature = "std")] dont_reenter: None, last_report_time: None, - corpus_idx: None, + last_found_time: libafl_bolts::current_time(), + corpus_id: None, stage_stack: StageStack::default(), phantom: PhantomData, #[cfg(feature = "std")] @@ -1108,6 +1199,23 @@ where } } +impl StdState, StdRand, InMemoryCorpus> { + /// Create an empty [`StdState`] that has very minimal uses. + /// Potentially good for testing. + pub fn nop() -> Result, StdRand, InMemoryCorpus>, Error> + where + I: Input, + { + StdState::new( + StdRand::with_seed(0), + InMemoryCorpus::::new(), + InMemoryCorpus::new(), + &mut (), + &mut (), + ) + } +} + #[cfg(feature = "introspection")] impl HasClientPerfMonitor for StdState { fn introspection_monitor(&self) -> &ClientPerfMonitor { @@ -1135,6 +1243,7 @@ impl HasScalabilityMonitor for StdState { pub struct NopState { metadata: SerdeAnyMap, execution: u64, + stop_requested: bool, rand: StdRand, phantom: PhantomData, } @@ -1147,6 +1256,7 @@ impl NopState { metadata: SerdeAnyMap::new(), execution: 0, rand: StdRand::default(), + stop_requested: false, phantom: PhantomData, } } @@ -1179,6 +1289,20 @@ impl HasExecutions for NopState { } } +impl Stoppable for NopState { + fn request_stop(&mut self) { + self.stop_requested = true; + } + + fn discard_stop_request(&mut self) { + self.stop_requested = false; + } + + fn stop_requested(&self) -> bool { + self.stop_requested + } +} + impl HasLastReportTime for NopState { fn last_report_time(&self) -> &Option { unimplemented!(); @@ -1214,11 +1338,11 @@ impl HasRand for NopState { impl State for NopState where I: Input {} impl HasCurrentCorpusId for NopState { - fn set_corpus_idx(&mut self, _idx: CorpusId) -> Result<(), Error> { + fn set_corpus_id(&mut self, _id: CorpusId) -> Result<(), Error> { Ok(()) } - fn clear_corpus_idx(&mut self) -> Result<(), Error> { + fn clear_corpus_id(&mut self) -> Result<(), Error> { Ok(()) } @@ -1227,16 +1351,16 @@ impl HasCurrentCorpusId for NopState { } } -impl HasCurrentStage for NopState { - fn set_current_stage_idx(&mut self, _idx: StageId) -> Result<(), Error> { +impl HasCurrentStageId for NopState { + fn set_current_stage_id(&mut self, _idx: StageId) -> Result<(), Error> { Ok(()) } - fn clear_stage(&mut self) -> Result<(), Error> { + fn clear_stage_id(&mut self) -> Result<(), Error> { Ok(()) } - fn current_stage_idx(&self) -> Result, Error> { + fn current_stage_id(&self) -> Result, Error> { Ok(None) } } @@ -1264,22 +1388,11 @@ impl HasScalabilityMonitor for NopState { } #[cfg(test)] -pub mod test { - use libafl_bolts::rands::StdRand; +mod test { + use crate::{inputs::BytesInput, state::StdState}; - use super::StdState; - use crate::{corpus::InMemoryCorpus, inputs::Input}; - - #[must_use] - pub fn test_std_state() -> StdState, StdRand, InMemoryCorpus> - { - StdState::new( - StdRand::with_seed(0), - InMemoryCorpus::::new(), - InMemoryCorpus::new(), - &mut (), - &mut (), - ) - .expect("couldn't instantiate the test state") + #[test] + fn test_std_state() { + StdState::nop::().expect("couldn't instantiate the test state"); } } diff --git a/libafl/src/state/stack.rs b/libafl/src/state/stack.rs index 72c5828df5..c07278cfa8 100644 --- a/libafl/src/state/stack.rs +++ b/libafl/src/state/stack.rs @@ -3,7 +3,7 @@ use alloc::vec::Vec; use libafl_bolts::Error; use serde::{Deserialize, Serialize}; -use crate::stages::{HasCurrentStage, HasNestedStageStatus, StageId}; +use crate::stages::{HasCurrentStageId, HasNestedStageStatus, StageId}; /// A stack to keep track of which stage is executing #[derive(Serialize, Deserialize, Clone, Debug, Default)] @@ -14,8 +14,8 @@ pub struct StageStack { stage_depth: usize, } -impl HasCurrentStage for StageStack { - fn set_current_stage_idx(&mut self, idx: StageId) -> Result<(), Error> { +impl HasCurrentStageId for StageStack { + fn set_current_stage_id(&mut self, idx: StageId) -> Result<(), Error> { // ensure we are in the right frame if self.stage_depth != self.stage_idx_stack.len() { return Err(Error::illegal_state( @@ -26,12 +26,12 @@ impl HasCurrentStage for StageStack { Ok(()) } - fn clear_stage(&mut self) -> Result<(), Error> { + fn clear_stage_id(&mut self) -> Result<(), Error> { self.stage_idx_stack.truncate(self.stage_depth); Ok(()) } - fn current_stage_idx(&self) -> Result, Error> { + fn current_stage_id(&self) -> Result, Error> { Ok(self.stage_idx_stack.get(self.stage_depth).copied()) } diff --git a/libafl_bolts/Cargo.toml b/libafl_bolts/Cargo.toml index c646fe151d..897c6f1394 100644 --- a/libafl_bolts/Cargo.toml +++ b/libafl_bolts/Cargo.toml @@ -1,7 +1,10 @@ [package] name = "libafl_bolts" version.workspace = true -authors = ["Andrea Fioraldi ", "Dominik Maier "] +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] description = "Low-level bolts to create fuzzers and so much more" documentation = "https://docs.rs/libafl" repository = "https://github.com/AFLplusplus/LibAFL/" @@ -9,22 +12,47 @@ readme = "./README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "testing", "security"] edition = "2021" -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] -rust-version = "1.70.0" +rust-version = "1.82" +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] [package.metadata.docs.rs] features = ["document-features"] all-features = true [features] -default = ["std", "derive", "llmp_compression", "llmp_small_maps", "rand_trait", "prelude", "gzip", "serdeany_autoreg", "alloc", "xxh3"] +default = [ + "std", + "derive", + "llmp_compression", + "llmp_small_maps", + "rand_trait", + "gzip", + "serdeany_autoreg", + "alloc", + "xxh3", +] document-features = ["dep:document-features"] #! # Feature Flags #! ### General Features ## Enables features that need rust's `std` lib to work, like print, env, ... support -std = ["serde_json", "serde_json/std", "hostname", "nix", "serde/std", "uuid", "backtrace", "uds", "serial_test", "alloc"] +std = [ + "hostname", + "nix", + "serde/std", + "uuid", + "backtrace", + "uds", + "serial_test", + "alloc", +] ## Enables all features that allocate in `no_std` alloc = ["serde/alloc", "hashbrown", "postcard", "erased-serde/alloc", "ahash"] @@ -65,10 +93,10 @@ xxh3 = ["xxhash-rust"] ## With this feature, the AnyMap uses [`type_name`](https://doc.rust-lang.org/std/any/fn.type_name.html) ## instead of [`TypeId::of`](https://doc.rust-lang.org/std/any/struct.TypeId.html#method.of) for deserialization. -## With this feature, stored state may remain deserializable across multiple compilations of LibAFL. -## This is **unsafe** and may lead to type confusions. Only use when you know what you are doing/ you have tests in place. -## The rust doc specifically states that "multiple types may map to the same type name"! -unsafe_stable_anymap = [] +## With this feature, stored state remains deserializable across multiple compilations of LibAFL. +## The rust doc specifically states that "multiple types may map to the same type name", so it could potentially lead to bugs. +## However, we make sure that no two types with the same name ever exist. +stable_anymap = [] ## Automatically register all `#[derive(SerdeAny)]` types at startup. serdeany_autoreg = ["ctor"] @@ -89,49 +117,77 @@ llmp_debug = ["alloc", "std"] llmp_small_maps = ["alloc"] [build-dependencies] -rustversion = "1.0" +rustversion = { workspace = true } [dependencies] -libafl_derive = { version = "0.13.0", optional = true, path = "../libafl_derive" } -static_assertions = "1.1.0" +libafl_derive = { workspace = true, default-features = true, optional = true } +static_assertions = { workspace = true } tuple_list = { version = "0.1.3" } -hashbrown = { version = "0.14", features = ["serde", "ahash"], default-features = false, optional = true } # A faster hashmap, nostd compatible -xxhash-rust = { version = "0.8.5", features = ["xxh3"], optional = true } # xxh3 hashing for rust -serde = { version = "1.0", default-features = false, features = ["derive"] } # serialization lib +hashbrown = { workspace = true, features = [ + "serde", + "ahash", +], default-features = false, optional = true } # A faster hashmap, nostd compatible +xxhash-rust = { version = "0.8.12", features = [ + "xxh3", +], optional = true } # xxh3 hashing for rust +serde = { workspace = true, default-features = false, features = [ + "derive", +] } # serialization lib erased-serde = { version = "0.4.5", default-features = false, optional = true } # erased serde -postcard = { version = "1.0", features = ["alloc"], default-features = false, optional = true } # no_std compatible serde serialization format -num_enum = { version = "0.7", default-features = false } -ahash = { version = "0.8", default-features = false, optional = true } # The hash function already used in hashbrown -backtrace = { version = "0.3", optional = true } # Used to get the stacktrace in StacktraceObserver +postcard = { workspace = true, optional = true } # no_std compatible serde serialization format +num_enum = { workspace = true, default-features = false } +ahash = { workspace = true, optional = true } # The hash function already used in hashbrown +backtrace = { workspace = true, default-features = true, optional = true } # Used to get the stacktrace in StacktraceObserver -ctor = { optional = true, version = "0.2" } -serde_json = { version = "1.0", optional = true, default-features = false, features = ["alloc"] } -miniz_oxide = { version = "0.7.1", optional = true } -hostname = { version = "^0.4", optional = true } # Is there really no gethostname in the stdlib? -rand_core = { version = "0.6", optional = true } -nix = { version = "0.29", default-features = false, optional = true, features = ["signal", "socket", "poll"] } -uuid = { version = "1.4", optional = true, features = ["serde", "v4"] } -clap = { version = "4.5", features = ["derive", "wrap_help"], optional = true } # CLI parsing, for libafl_bolts::cli / the `cli` feature -log = "0.4.20" - -pyo3 = { version = "0.18", optional = true, features = ["serde", "macros"] } +ctor = { optional = true, version = "0.2.9" } +miniz_oxide = { version = "0.8.0", optional = true } +hostname = { version = "0.4.0", optional = true } # Is there really no gethostname in the stdlib? +rand_core = { version = "0.6.4", optional = true } +nix = { workspace = true, optional = true, default-features = false, features = [ + "fs", + "signal", + "socket", + "poll", +] } +uuid = { workspace = true, optional = true, features = ["serde", "v4"] } +clap = { workspace = true, features = [ + "derive", + "wrap_help", +], optional = true } # CLI parsing, for libafl_bolts::cli / the `cli` feature +log = { workspace = true } +pyo3 = { workspace = true, optional = true, features = ["serde", "macros"] } # optional-dev deps (change when target.'cfg(accessible(::std))'.test-dependencies will be stable) -serial_test = { version = "3", optional = true, default-features = false, features = ["logging"] } +serial_test = { workspace = true, optional = true, default-features = false, features = [ + "logging", +] } # Document all features of this crate (for `cargo doc`) -document-features = { version = "0.2", optional = true } +document-features = { workspace = true, optional = true } + +[lints] +workspace = true [target.'cfg(unix)'.dependencies] -libc = "0.2" # For (*nix) libc -uds = { version = "0.4", optional = true, default-features = false } +libc = { workspace = true } # For (*nix) libc +uds = { version = "0.4.2", optional = true, default-features = false } [target.'cfg(windows)'.dependencies] -windows = { version = "0.51.1", features = ["Win32_Foundation", "Win32_System_Threading", "Win32_System_Diagnostics_Debug", "Win32_System_Kernel", "Win32_System_Memory", "Win32_Security", "Win32_System_SystemInformation", "Win32_System_Console"] } +windows = { workspace = true, features = [ + "Win32_Foundation", + "Win32_System_Threading", + "Win32_System_Diagnostics_Debug", + "Win32_System_Kernel", + "Win32_System_Memory", + "Win32_Security", + "Win32_System_SystemInformation", + "Win32_System_Console", +] } +windows-result = "0.2.0" [target.'cfg(windows)'.build-dependencies] -windows = "0.51.1" +windows = { workspace = true } [target.'cfg(target_vendor = "apple")'.dependencies] mach = "0.3.2" diff --git a/libafl_bolts/README.md b/libafl_bolts/README.md index d29ef59208..0a638ac124 100644 --- a/libafl_bolts/README.md +++ b/libafl_bolts/README.md @@ -9,7 +9,7 @@ Some cross-platform things in bolts include (but are not limited to): * ShMem: A cross-platform (Windows, Linux, Android, MacOS) shared memory implementation * LLMP: A fast, lock-free IPC mechanism via SharedMap * Core_affinity: A maintained version of `core_affinity` that can be used to get core information and bind processes to cores -* Rands: Fast random number generators for fuzzing (like [RomuRand](http://www.romu-random.org/)) +* Rands: Fast random number generators for fuzzing (like [RomuRand](https://www.romu-random.org/)) * MiniBSOD: get and print information about the current process state including important registers. * Tuples: Haskel-like compile-time tuple lists * Os: OS specific stuff like signal handling, windows exception handling, pipes, and helpers for `fork` @@ -37,8 +37,8 @@ Some of the parts in this list may be hard, don't be afraid to open a PR if you #### License -Licensed under either of Apache License, Version -2.0 or MIT license at your option. +Licensed under either of Apache License, Version +2.0 or MIT license at your option.
diff --git a/libafl_bolts/examples/llmp_test/main.rs b/libafl_bolts/examples/llmp_test/main.rs index a18778f83e..50762a536c 100644 --- a/libafl_bolts/examples/llmp_test/main.rs +++ b/libafl_bolts/examples/llmp_test/main.rs @@ -9,7 +9,7 @@ use std::marker::PhantomData; #[cfg(all(feature = "std", not(target_os = "haiku")))] use std::{num::NonZeroUsize, thread, time}; -use libafl_bolts::{bolts_prelude::LlmpMsgHookResult, llmp::LlmpBrokerInner}; +use libafl_bolts::llmp::{LlmpBrokerInner, LlmpMsgHookResult}; #[cfg(all(feature = "std", not(target_os = "haiku")))] use libafl_bolts::{ llmp::{self, Flags, LlmpHook, Tag}, @@ -124,6 +124,7 @@ where msg_tag: &mut Tag, _msg_flags: &mut Flags, msg: &mut [u8], + _new_msgs: &mut Vec<(Tag, Flags, Vec)>, ) -> Result { match *msg_tag { _TAG_SIMPLE_U32_V1 => { @@ -167,6 +168,8 @@ fn main() { fn main() -> Result<(), Box> { /* The main node has a broker, and a few worker threads */ + use libafl_bolts::llmp::Broker; + let mode = std::env::args() .nth(1) .expect("no mode specified, chose 'broker', 'b2b', 'ctr', 'adder', 'large', or 'exiting'"); @@ -192,9 +195,7 @@ fn main() -> Result<(), Box> { )?; broker.inner_mut().launch_tcp_listener_on(port)?; // Exit when we got at least _n_ nodes, and all of them quit. - broker - .inner_mut() - .set_exit_cleanly_after(NonZeroUsize::new(1_usize).unwrap()); + broker.set_exit_after(NonZeroUsize::new(1_usize).unwrap()); broker.loop_with_timeouts(BROKER_TIMEOUT, Some(SLEEP_BETWEEN_FORWARDS)); } "b2b" => { diff --git a/libafl_bolts/src/anymap.rs b/libafl_bolts/src/anymap.rs index 2c2bf16673..29eae29e03 100644 --- a/libafl_bolts/src/anymap.rs +++ b/libafl_bolts/src/anymap.rs @@ -1,43 +1,6 @@ //! Poor-rust-man's downcasts to have `AnyMap` -use alloc::boxed::Box; -use core::{ - any::{Any, TypeId}, - mem::size_of, - ptr::{addr_of, read_unaligned}, -}; - -/// Convert to an Any trait object -pub trait AsAny: Any { - /// Returns this as Any trait - fn as_any(&self) -> &dyn Any; - /// Returns this as mutable Any trait - fn as_any_mut(&mut self) -> &mut dyn Any; - /// Returns this as boxed Any trait - fn as_any_boxed(self: Box) -> Box; -} - -/// Implement `AsAny` for a type -#[macro_export] -macro_rules! impl_asany { - ($struct_name:ident $(< $( $lt:tt $( : $clt:tt $(+ $dlt:tt )* )? ),+ >)?) => { - impl $(< $( $lt $( : $clt $(+ $dlt )* )? ),+ >)? $crate::anymap::AsAny for $struct_name $(< $( $lt ),+ >)? { - fn as_any(&self) -> &dyn ::core::any::Any { - self - } - - fn as_any_mut(&mut self) -> &mut dyn ::core::any::Any { - self - } - - fn as_any_boxed( - self: ::alloc::boxed::Box, - ) -> ::alloc::boxed::Box { - self - } - } - }; -} +use core::{any::TypeId, mem::size_of, ptr::read_unaligned}; /// Get a `type_id` from its previously unpacked `u128`. /// Opposite of [`unpack_type_id(id)`]. @@ -50,7 +13,7 @@ macro_rules! impl_asany { pub const fn pack_type_id(id: u128) -> TypeId { // TypeId size of other sizes is not yet supported" static_assertions::const_assert!(size_of::() == 16); - unsafe { *(addr_of!(id) as *const TypeId) } + unsafe { *(&raw const id as *const TypeId) } } /// Unpack a `type_id` to an `u128` @@ -65,7 +28,7 @@ pub const fn unpack_type_id(id: TypeId) -> u128 { // see any.rs, it's alway u128 hence 16 bytes. // TypeId size of other sizes is not yet supported" static_assertions::const_assert!(size_of::() == 16); - let ret: u128 = unsafe { read_unaligned::(addr_of!(id) as *const u128) }; + let ret: u128 = unsafe { read_unaligned::(&raw const id as *const u128) }; ret } diff --git a/libafl_bolts/src/cli.rs b/libafl_bolts/src/cli.rs index 920c5a4c35..51cc199b76 100644 --- a/libafl_bolts/src/cli.rs +++ b/libafl_bolts/src/cli.rs @@ -39,9 +39,7 @@ //! fn fuzz_with_qemu(mut options: FuzzerOptions) { //! env::remove_var("LD_LIBRARY_PATH"); //! -//! let env: Vec<(String, String)> = env::vars().collect(); -//! -//! let qemu = Qemu::init(&mut options.qemu_args.to_vec(), &mut env).unwrap(); +//! let qemu = Qemu::init(&mut options.qemu_args.to_vec()).unwrap(); //! // do other stuff... //! } //! @@ -70,6 +68,8 @@ use alloc::{string::String, vec::Vec}; use std::error; use std::{net::SocketAddr, path::PathBuf, time::Duration}; +#[cfg(feature = "frida_cli")] +use clap::ValueEnum; use clap::{Command, CommandFactory, Parser}; use serde::{Deserialize, Serialize}; @@ -104,6 +104,17 @@ fn parse_instrumentation_location( )) } +/// The scripting engine to use for JavaScript scripting support +#[cfg(feature = "frida_cli")] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Hash, ValueEnum, Default)] +pub enum FridaScriptBackend { + /// The Google V8 engine + V8, + /// `QuickJS` by Fabrice Bellard + #[default] + QuickJS, +} + /// Top-level container for cli options/arguments/subcommands #[derive(Parser, Clone, Debug, Serialize, Deserialize)] #[command( @@ -302,6 +313,16 @@ pub struct FuzzerOptions { requires = "replay" )] pub repeat: Option, + + /// The backend scripting engine to use for JavaScript scripting support + #[cfg(feature = "frida_cli")] + #[arg(long, help_heading = "Frida Options")] + pub backend: Option, + + /// The path to the Frida script to load into the target + #[cfg(feature = "frida_cli")] + #[arg(long, help_heading = "Frida Options")] + pub script: Option, } impl FuzzerOptions { diff --git a/libafl_bolts/src/core_affinity.rs b/libafl_bolts/src/core_affinity.rs index da06b5b160..e921425e81 100644 --- a/libafl_bolts/src/core_affinity.rs +++ b/libafl_bolts/src/core_affinity.rs @@ -310,6 +310,8 @@ mod linux { } fn new_cpu_set() -> cpu_set_t { + // # Safety + // Returning a new zeroed value that is allowed to be 0. unsafe { zeroed::() } } @@ -593,8 +595,6 @@ fn set_for_current_helper(core_id: CoreId) -> Result<(), Error> { #[cfg(target_vendor = "apple")] mod apple { use alloc::vec::Vec; - #[cfg(target_arch = "x86_64")] - use core::ptr::addr_of_mut; use std::thread::available_parallelism; #[cfg(target_arch = "x86_64")] @@ -643,7 +643,7 @@ mod apple { let result = thread_policy_set( pthread_mach_thread_np(pthread_self()), THREAD_AFFINITY_POLICY as _, - addr_of_mut!(info) as thread_policy_t, + &raw mut info as thread_policy_t, THREAD_AFFINITY_POLICY_COUNT, ); @@ -733,6 +733,8 @@ mod netbsd { } fn new_cpuset() -> *mut _cpuset { + // # Safety + // Simply creating new empty cpuset. No user-provided params. unsafe { _cpuset_create() } } } diff --git a/libafl_bolts/src/cpu.rs b/libafl_bolts/src/cpu.rs index 0235c05d4f..6722fb7085 100644 --- a/libafl_bolts/src/cpu.rs +++ b/libafl_bolts/src/cpu.rs @@ -1,4 +1,4 @@ -//! Architecture agnostic processor features +//! Fast implementations for specific CPU architectures. #[cfg(any(target_arch = "aarch64", target_arch = "arm"))] use core::arch::asm; diff --git a/libafl_bolts/src/fs.rs b/libafl_bolts/src/fs.rs index 2c2257d0c2..ce362f2bf2 100644 --- a/libafl_bolts/src/fs.rs +++ b/libafl_bolts/src/fs.rs @@ -1,11 +1,15 @@ //! `LibAFL` functionality for filesystem interaction -#[cfg(feature = "std")] -use alloc::borrow::ToOwned; use alloc::rc::Rc; +#[cfg(feature = "std")] +use alloc::{borrow::ToOwned, vec::Vec}; use core::cell::RefCell; +#[cfg(feature = "std")] +use core::time::Duration; #[cfg(unix)] use std::os::unix::prelude::{AsRawFd, RawFd}; +#[cfg(feature = "std")] +use std::time::SystemTime; use std::{ fs::{self, remove_file, File, OpenOptions}, io::{Seek, Write}, @@ -25,6 +29,8 @@ pub fn get_unique_std_input_file() -> String { format!("{}_{}", INPUTFILE_STD, std::process::id()) } +/// Write a file atomically +/// /// Creates a `.{file_name}.tmp` file, and writes all bytes to it. /// After all bytes have been written, the tmp-file is moved to it's original `path`. /// This way, on the majority of operating systems, the final file will never be incomplete or racey. @@ -140,6 +146,44 @@ impl InputFile { } } +/// Finds new files in the given directory, taking the last time we looked at this path as parameter. +/// This method works recursively. +/// If `last` is `None`, it'll load all file. +#[cfg(feature = "std")] +pub fn find_new_files_rec>( + dir: P, + last_check: &Option, +) -> Result, Error> { + let mut new_files = Vec::::new(); + for entry in fs::read_dir(dir)? { + let entry = entry?; + let path = entry.path(); + let attributes = fs::metadata(&path); + + if attributes.is_err() { + continue; + } + + let attr = attributes?; + + if attr.is_file() && attr.len() > 0 { + if let Ok(time) = attr.modified() { + if let Some(last_check) = last_check { + if time.duration_since(SystemTime::UNIX_EPOCH).unwrap() < *last_check { + continue; + } + } + new_files.push(path.clone()); + } + } else if attr.is_dir() { + let dir_left_to_sync = find_new_files_rec(entry.path(), last_check)?; + new_files.extend(dir_left_to_sync); + } + } + + Ok(new_files) +} + #[cfg(feature = "std")] impl Drop for InputFile { fn drop(&mut self) { diff --git a/libafl_bolts/src/lib.rs b/libafl_bolts/src/lib.rs index 1a9efaba89..0c6af328db 100644 --- a/libafl_bolts/src/lib.rs +++ b/libafl_bolts/src/lib.rs @@ -4,35 +4,11 @@ #![doc = include_str!("../README.md")] /*! */ #![cfg_attr(feature = "document-features", doc = document_features::document_features!())] -#![forbid(unexpected_cfgs)] -#![allow(incomplete_features)] #![no_std] // For `type_eq` #![cfg_attr(nightly, feature(specialization))] // For `std::simd` #![cfg_attr(nightly, feature(portable_simd))] -// For `core::error` -#![cfg_attr(nightly, feature(error_in_core))] -#![warn(clippy::cargo)] -#![allow(ambiguous_glob_reexports)] -#![deny(clippy::cargo_common_metadata)] -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(clippy::all)] -#![deny(clippy::pedantic)] -#![allow( - clippy::unreadable_literal, - clippy::type_repetition_in_bounds, - clippy::missing_errors_doc, - clippy::cast_possible_truncation, - clippy::used_underscore_binding, - clippy::ptr_as_ptr, - clippy::missing_panics_doc, - clippy::missing_docs_in_private_items, - clippy::module_name_repetitions, - clippy::ptr_cast_constness, - clippy::negative_feature_names, - clippy::too_many_lines -)] #![cfg_attr(not(test), warn( missing_debug_implementations, missing_docs, @@ -73,16 +49,15 @@ while_true ) )] -// Till they fix this buggy lint in clippy -#![allow(clippy::borrow_as_ptr)] -#![allow(clippy::borrow_deref_ref)] /// We need some sort of "[`String`]" for errors in `no_alloc`... /// We can only support `'static` without allocator, so let's do that. #[cfg(not(feature = "alloc"))] type String = &'static str; -/// We also need a non-allocating format... +/// A simple non-allocating "format" string wrapper for no-std. +/// +/// Problem is that we really need a non-allocating format... /// This one simply returns the `fmt` string. /// Good enough for simple errors, for anything else, use the `alloc` feature. #[cfg(not(feature = "alloc"))] @@ -133,6 +108,8 @@ pub mod serdeany; pub mod shmem; #[cfg(feature = "std")] pub mod staterestore; +#[cfg(feature = "alloc")] +pub mod subrange; // TODO: reenable once ahash works in no-alloc #[cfg(any(feature = "xxh3", feature = "alloc"))] pub mod tuples; @@ -170,8 +147,6 @@ use alloc::{borrow::Cow, vec::Vec}; use core::hash::BuildHasher; #[cfg(any(feature = "xxh3", feature = "alloc"))] use core::hash::Hasher; -#[cfg(all(unix, feature = "std"))] -use core::ptr; #[cfg(feature = "std")] use std::time::{SystemTime, UNIX_EPOCH}; #[cfg(all(unix, feature = "std"))] @@ -236,14 +211,15 @@ pub type ErrorBacktrace = backtrace::Backtrace; #[cfg(not(feature = "errors_backtrace"))] #[derive(Debug, Default)] -/// Empty struct to use when `errors_backtrace` is disabled -pub struct ErrorBacktrace {} +/// ZST to use when `errors_backtrace` is disabled +pub struct ErrorBacktrace; + #[cfg(not(feature = "errors_backtrace"))] impl ErrorBacktrace { /// Nop #[must_use] pub fn new() -> Self { - Self {} + Self } } @@ -257,6 +233,8 @@ fn display_error_backtrace(_f: &mut fmt::Formatter, _err: &ErrorBacktrace) -> fm fmt::Result::Ok(()) } +/// Returns the standard input [`Hasher`] +/// /// Returns the hasher for the input with a given hash, depending on features: /// [`xxh3_64`](https://docs.rs/xxhash-rust/latest/xxhash_rust/xxh3/fn.xxh3_64.html) /// if the `xxh3` feature is used, /// else [`ahash`](https://docs.rs/ahash/latest/ahash/). @@ -269,6 +247,8 @@ pub fn hasher_std() -> impl Hasher + Clone { RandomState::with_seeds(0, 0, 0, 0).build_hasher() } +/// Hashes the input with a given hash +/// /// Hashes the input with a given hash, depending on features: /// [`xxh3_64`](https://docs.rs/xxhash-rust/latest/xxhash_rust/xxh3/fn.xxh3_64.html) /// if the `xxh3` feature is used, /// else [`ahash`](https://docs.rs/ahash/latest/ahash/). @@ -311,13 +291,15 @@ pub enum Error { Unsupported(String, ErrorBacktrace), /// Shutting down, not really an error. ShuttingDown, - /// OS error, wrapping a [`std::io::Error`] + /// OS error, wrapping a [`io::Error`] #[cfg(feature = "std")] OsError(io::Error, String, ErrorBacktrace), /// Something else happened Unknown(String, ErrorBacktrace), /// Error with the corpora InvalidCorpus(String, ErrorBacktrace), + /// Error specific to a runtime like QEMU or Frida + Runtime(String, ErrorBacktrace), } impl Error { @@ -329,12 +311,14 @@ impl Error { { Error::Serialize(arg.into(), ErrorBacktrace::new()) } + #[cfg(feature = "gzip")] /// Compression error #[must_use] pub fn compression() -> Self { Error::Compression(ErrorBacktrace::new()) } + /// Optional val was supposed to be set, but isn't. #[must_use] pub fn empty_optional(arg: S) -> Self @@ -343,6 +327,7 @@ impl Error { { Error::EmptyOptional(arg.into(), ErrorBacktrace::new()) } + /// Key not in Map #[must_use] pub fn key_not_found(arg: S) -> Self @@ -351,6 +336,7 @@ impl Error { { Error::KeyNotFound(arg.into(), ErrorBacktrace::new()) } + /// No elements in the current item #[must_use] pub fn empty(arg: S) -> Self @@ -359,6 +345,7 @@ impl Error { { Error::Empty(arg.into(), ErrorBacktrace::new()) } + /// End of iteration #[must_use] pub fn iterator_end(arg: S) -> Self @@ -367,6 +354,7 @@ impl Error { { Error::IteratorEnd(arg.into(), ErrorBacktrace::new()) } + /// This is not supported (yet) #[must_use] pub fn not_implemented(arg: S) -> Self @@ -375,6 +363,7 @@ impl Error { { Error::NotImplemented(arg.into(), ErrorBacktrace::new()) } + /// You're holding it wrong #[must_use] pub fn illegal_state(arg: S) -> Self @@ -383,6 +372,7 @@ impl Error { { Error::IllegalState(arg.into(), ErrorBacktrace::new()) } + /// The argument passed to this method or function is not valid #[must_use] pub fn illegal_argument(arg: S) -> Self @@ -391,11 +381,13 @@ impl Error { { Error::IllegalArgument(arg.into(), ErrorBacktrace::new()) } + /// Shutting down, not really an error. #[must_use] pub fn shutting_down() -> Self { Error::ShuttingDown } + /// This operation is not supported on the current architecture or platform #[must_use] pub fn unsupported(arg: S) -> Self @@ -404,6 +396,7 @@ impl Error { { Error::Unsupported(arg.into(), ErrorBacktrace::new()) } + /// OS error with additional message #[cfg(feature = "std")] #[must_use] @@ -413,7 +406,8 @@ impl Error { { Error::OsError(err, msg.into(), ErrorBacktrace::new()) } - /// OS error from [`std::io::Error::last_os_error`] with additional message + + /// OS error from [`io::Error::last_os_error`] with additional message #[cfg(feature = "std")] #[must_use] pub fn last_os_error(msg: S) -> Self @@ -426,6 +420,7 @@ impl Error { ErrorBacktrace::new(), ) } + /// Something else happened #[must_use] pub fn unknown(arg: S) -> Self @@ -434,6 +429,7 @@ impl Error { { Error::Unknown(arg.into(), ErrorBacktrace::new()) } + /// Error with corpora #[must_use] pub fn invalid_corpus(arg: S) -> Self @@ -442,6 +438,26 @@ impl Error { { Error::InvalidCorpus(arg.into(), ErrorBacktrace::new()) } + + /// Error specific to some runtime, like QEMU or Frida + #[must_use] + pub fn runtime(arg: S) -> Self + where + S: Into, + { + Error::Runtime(arg.into(), ErrorBacktrace::new()) + } +} + +impl core::error::Error for Error { + #[cfg(feature = "std")] + fn source(&self) -> Option<&(dyn core::error::Error + 'static)> { + if let Self::OsError(err, _, _) = self { + Some(err) + } else { + None + } + } } impl Display for Error { @@ -506,6 +522,10 @@ impl Display for Error { write!(f, "Invalid corpus: {0}", &s)?; display_error_backtrace(f, b) } + Self::Runtime(s, b) => { + write!(f, "Runtime error: {0}", &s)?; + display_error_backtrace(f, b) + } } } } @@ -536,14 +556,6 @@ impl From for Error { } } -/// Stringify the json serializer error -#[cfg(feature = "std")] -impl From for Error { - fn from(err: serde_json::Error) -> Self { - Self::serialize(format!("{err:?}")) - } -} - #[cfg(all(unix, feature = "std"))] impl From for Error { fn from(err: nix::Error) -> Self { @@ -612,9 +624,9 @@ impl From for Error { } #[cfg(windows)] -impl From for Error { +impl From for Error { #[allow(unused_variables)] - fn from(err: windows::core::Error) -> Self { + fn from(err: windows_result::Error) -> Self { Self::unknown(format!("Windows API error: {err:?}")) } } @@ -623,10 +635,13 @@ impl From for Error { impl From for Error { fn from(err: pyo3::PyErr) -> Self { pyo3::Python::with_gil(|py| { - if err.matches( - py, - pyo3::types::PyType::new::(py), - ) { + if err + .matches( + py, + pyo3::types::PyType::new::(py), + ) + .unwrap() + { Self::shutting_down() } else { Self::illegal_state(format!("Python exception: {err:?}")) @@ -635,12 +650,6 @@ impl From for Error { } } -#[cfg(all(not(nightly), feature = "std"))] -impl std::error::Error for Error {} - -#[cfg(nightly)] -impl core::error::Error for Error {} - /// The purpose of this module is to alleviate imports of many components by adding a glob import. #[cfg(feature = "prelude")] pub mod prelude { @@ -677,7 +686,18 @@ pub trait AsSlice<'a> { fn as_slice(&'a self) -> Self::SliceRef; } -impl<'a, T, R> AsSlice<'a> for R +/// Can be converted to a slice +pub trait AsSizedSlice<'a, const N: usize> { + /// Type of the entries of this slice + type Entry: 'a; + /// Type of the reference to this slice + type SliceRef: Deref; + + /// Convert to a slice + fn as_sized_slice(&'a self) -> Self::SliceRef; +} + +impl<'a, T, R: ?Sized> AsSlice<'a> for R where T: 'a, R: Deref, @@ -686,7 +706,20 @@ where type SliceRef = &'a [T]; fn as_slice(&'a self) -> Self::SliceRef { - &*self + self + } +} + +impl<'a, T, const N: usize, R: ?Sized> AsSizedSlice<'a, N> for R +where + T: 'a, + R: Deref, +{ + type Entry = T; + type SliceRef = &'a [T; N]; + + fn as_sized_slice(&'a self) -> Self::SliceRef { + self } } @@ -699,7 +732,16 @@ pub trait AsSliceMut<'a>: AsSlice<'a> { fn as_slice_mut(&'a mut self) -> Self::SliceRefMut; } -impl<'a, T, R> AsSliceMut<'a> for R +/// Can be converted to a mutable slice +pub trait AsSizedSliceMut<'a, const N: usize>: AsSizedSlice<'a, N> { + /// Type of the mutable reference to this slice + type SliceRefMut: DerefMut; + + /// Convert to a slice + fn as_sized_slice_mut(&'a mut self) -> Self::SliceRefMut; +} + +impl<'a, T, R: ?Sized> AsSliceMut<'a> for R where T: 'a, R: DerefMut, @@ -711,6 +753,18 @@ where } } +impl<'a, T, const N: usize, R: ?Sized> AsSizedSliceMut<'a, N> for R +where + T: 'a, + R: DerefMut, +{ + type SliceRefMut = &'a mut [T; N]; + + fn as_sized_slice_mut(&'a mut self) -> Self::SliceRefMut { + &mut *self + } +} + /// Create an `Iterator` from a reference pub trait AsIter<'it> { /// The item type @@ -985,9 +1039,11 @@ impl SimpleFdLogger { // # Safety // The passed-in `fd` has to be a legal file descriptor to log to. // We also access a shared variable here. + let logger = &raw mut LIBAFL_RAWFD_LOGGER; unsafe { - LIBAFL_RAWFD_LOGGER.set_fd(log_fd); - log::set_logger(&*ptr::addr_of!(LIBAFL_RAWFD_LOGGER))?; + let logger = &mut *logger; + logger.set_fd(log_fd); + log::set_logger(logger)?; } Ok(()) } @@ -1054,11 +1110,38 @@ pub unsafe fn set_error_print_panic_hook(new_stderr: RawFd) { })); } +/// Zero-cost way to construct [`core::num::NonZeroUsize`] at compile-time. +#[macro_export] +macro_rules! nonzero { + // TODO: Further simplify with `unwrap`/`expect` once MSRV includes + // https://github.com/rust-lang/rust/issues/67441 + ($val:expr) => { + const { + match core::num::NonZero::new($val) { + Some(x) => x, + None => panic!("Value passed to `nonzero!` was zero"), + } + } + }; +} + +/// Get a [`core::ptr::NonNull`] to a global static mut (or similar). +/// +/// The same as [`core::ptr::addr_of_mut`] or `&raw mut`, but wrapped in said [`NonNull`](core::ptr::NonNull). +#[macro_export] +macro_rules! nonnull_raw_mut { + ($val:expr) => { + // # Safety + // The pointer to a value will never be null (unless we're on an archaic OS in a CTF challenge). + unsafe { core::ptr::NonNull::new(&raw mut $val).unwrap_unchecked() } + }; +} + #[cfg(feature = "python")] #[allow(missing_docs)] pub mod pybind { - use pyo3::{pymodule, types::PyModule, PyResult, Python}; + use pyo3::{pymodule, types::PyModule, Bound, PyResult}; #[macro_export] macro_rules! unwrap_me_body { @@ -1190,8 +1273,8 @@ pub mod pybind { #[pymodule] #[pyo3(name = "libafl_bolts")] /// Register the classes to the python module - pub fn python_module(py: Python, m: &PyModule) -> PyResult<()> { - crate::rands::pybind::register(py, m)?; + pub fn python_module(m: &Bound<'_, PyModule>) -> PyResult<()> { + crate::rands::pybind::register(m)?; Ok(()) } } @@ -1199,9 +1282,6 @@ pub mod pybind { #[cfg(test)] mod tests { - #[cfg(all(feature = "std", unix))] - use core::ptr; - #[cfg(all(feature = "std", unix))] use crate::LIBAFL_RAWFD_LOGGER; @@ -1211,8 +1291,10 @@ mod tests { use std::{io::stdout, os::fd::AsRawFd}; unsafe { LIBAFL_RAWFD_LOGGER.fd = stdout().as_raw_fd() }; + + let libafl_rawfd_logger_fd = &raw const LIBAFL_RAWFD_LOGGER; unsafe { - log::set_logger(&*ptr::addr_of!(LIBAFL_RAWFD_LOGGER)).unwrap(); + log::set_logger(&*libafl_rawfd_logger_fd).unwrap(); } log::set_max_level(log::LevelFilter::Debug); log::info!("Test"); diff --git a/libafl_bolts/src/llmp.rs b/libafl_bolts/src/llmp.rs index 9f9b71481d..07abd68aa4 100644 --- a/libafl_bolts/src/llmp.rs +++ b/libafl_bolts/src/llmp.rs @@ -77,6 +77,7 @@ use core::{ }; #[cfg(feature = "std")] use std::{ + boxed::Box, env, io::{ErrorKind, Read, Write}, net::{SocketAddr, TcpListener, TcpStream, ToSocketAddrs}, @@ -90,12 +91,13 @@ use backtrace::Backtrace; #[cfg(not(any(target_os = "solaris", target_os = "illumos")))] use nix::sys::socket::{self, sockopt::ReusePort}; use serde::{Deserialize, Serialize}; +#[cfg(feature = "std")] use tuple_list::tuple_list; #[cfg(all(unix, not(miri)))] use crate::os::unix_signals::setup_signal_handler; #[cfg(unix)] -use crate::os::unix_signals::{siginfo_t, ucontext_t, Handler, Signal}; +use crate::os::unix_signals::{siginfo_t, ucontext_t, Signal, SignalHandler}; #[cfg(all(windows, feature = "std"))] use crate::os::windows_exceptions::{setup_ctrl_handler, CtrlHandler}; #[cfg(feature = "std")] @@ -141,6 +143,8 @@ pub const LLMP_FLAG_INITIALIZED: Flags = Flags(0x0); pub const LLMP_FLAG_COMPRESSED: Flags = Flags(0x1); /// From another broker. pub const LLMP_FLAG_FROM_B2B: Flags = Flags(0x2); +/// From another machine (with the `multi_machine` mode) +pub const LLMP_FLAG_FROM_MM: Flags = Flags(0x4); /// Timt the broker 2 broker connection waits for incoming data, /// before checking for own data to forward again. @@ -569,7 +573,7 @@ unsafe fn llmp_next_msg_ptr_checked( let msg_begin_min = (page as *const u8).add(size_of::()); // We still need space for this msg (alloc_size). let msg_begin_max = (page as *const u8).add(map_size - alloc_size); - let next = _llmp_next_msg_ptr(last_msg); + let next = llmp_next_msg_ptr(last_msg); let next_ptr = next as *const u8; if next_ptr >= msg_begin_min && next_ptr <= msg_begin_max { Ok(next) @@ -588,8 +592,8 @@ unsafe fn llmp_next_msg_ptr_checked( /// Will dereference the `last_msg` ptr #[inline] #[allow(clippy::cast_ptr_alignment)] -unsafe fn _llmp_next_msg_ptr(last_msg: *const LlmpMsg) -> *mut LlmpMsg { - /* DBG("_llmp_next_msg_ptr %p %lu + %lu\n", last_msg, last_msg->buf_len_padded, sizeof(llmp_message)); */ +unsafe fn llmp_next_msg_ptr(last_msg: *const LlmpMsg) -> *mut LlmpMsg { + /* DBG("llmp_next_msg_ptr %p %lu + %lu\n", last_msg, last_msg->buf_len_padded, sizeof(llmp_message)); */ (last_msg as *mut u8) .add(size_of::()) .add((*last_msg).buf_len_padded as usize) as *mut LlmpMsg @@ -661,6 +665,8 @@ impl LlmpMsg { /// Gets the buffer from this message as slice, with the correct length. #[inline] pub fn try_as_slice(&self, map: &mut LlmpSharedMap) -> Result<&[u8], Error> { + // # Safety + // Safe because we check if we're in a valid shmem region first. unsafe { if self.in_shmem(map) { Ok(self.as_slice_unsafe()) @@ -855,9 +861,9 @@ impl LlmpPage { #[inline] fn receiver_left(&mut self) { - let receivers_joined_count = &mut self.receivers_joined_count; + let receivers_left_count = &mut self.receivers_left_count; //receivers_joined_count.fetch_add(1, Ordering::Relaxed); - receivers_joined_count.store(1, Ordering::Relaxed); + receivers_left_count.store(1, Ordering::Relaxed); } } @@ -1091,7 +1097,7 @@ where } /// For non zero-copy, we want to get rid of old pages with duplicate messages in the client - /// eventually. This function This function sees if we can deallocate older pages. + /// eventually. This function sees if we can deallocate older pages. /// The broker would have informed us by setting the safe_to_unmap-flag. unsafe fn prune_old_pages(&mut self) { // Exclude the current page by splitting of the last element for this iter @@ -1142,7 +1148,7 @@ where let last_msg = self.last_msg_sent; assert!((*page).size_used + EOP_MSG_SIZE <= (*page).size_total, "PROGRAM ABORT : BUG: EOP does not fit in page! page {page:?}, size_current {:?}, size_total {:?}", - ptr::addr_of!((*page).size_used), ptr::addr_of!((*page).size_total)); + &raw const (*page).size_used, &raw const (*page).size_total); let ret: *mut LlmpMsg = if last_msg.is_null() { (*page).messages.as_mut_ptr() @@ -1234,14 +1240,14 @@ where MessageId((*last_msg).message_id.0 + 1) } else { /* Oops, wrong usage! */ - panic!("BUG: The current message never got committed using send! (page->current_msg_id {:?}, last_msg->message_id: {:?})", ptr::addr_of!((*page).current_msg_id), (*last_msg).message_id); + panic!("BUG: The current message never got committed using send! (page->current_msg_id {:?}, last_msg->message_id: {:?})", &raw const (*page).current_msg_id, (*last_msg).message_id); }; (*ret).buf_len = buf_len as u64; (*ret).buf_len_padded = buf_len_padded as u64; (*page).size_used += size_of::() + buf_len_padded; - (*_llmp_next_msg_ptr(ret)).tag = LLMP_TAG_UNSET; + (*llmp_next_msg_ptr(ret)).tag = LLMP_TAG_UNSET; (*ret).tag = LLMP_TAG_UNINITIALIZED; self.has_unsent_message = true; @@ -1274,7 +1280,8 @@ where ))); } - (*msg).message_id.0 = (*page).current_msg_id.load(Ordering::Relaxed) + 1; + let mid = (*page).current_msg_id.load(Ordering::Relaxed) + 1; + (*msg).message_id.0 = mid; // Make sure all things have been written to the page, and commit the message to the page (*page) @@ -1283,6 +1290,14 @@ where self.last_msg_sent = msg; self.has_unsent_message = false; + + log::debug!( + "[{} - {:#x}] Send message with id {}", + self.id.0, + ptr::from_ref::(self) as u64, + mid + ); + Ok(()) } @@ -1483,7 +1498,7 @@ where (*page).size_used -= old_len_padded as usize; (*page).size_used += buf_len_padded; - (*_llmp_next_msg_ptr(msg)).tag = LLMP_TAG_UNSET; + (*llmp_next_msg_ptr(msg)).tag = LLMP_TAG_UNSET; Ok(()) } @@ -1691,6 +1706,14 @@ where if !(*msg).in_shmem(&mut self.current_recv_shmem) { return Err(Error::illegal_state("Unexpected message in map (out of map bounds) - buggy client or tampered shared map detected!")); } + + log::debug!( + "[{} - {:#x}] Received message with ID {}...", + self.id.0, + ptr::from_ref::(self) as u64, + (*msg).message_id.0 + ); + // Handle special, LLMP internal, messages. match (*msg).tag { LLMP_TAG_UNSET => panic!( @@ -1799,6 +1822,8 @@ where #[allow(clippy::type_complexity)] #[inline] pub fn recv_buf_with_flags(&mut self) -> Result, Error> { + // # Safety + // No user-provided potentially unsafe parameters. unsafe { Ok(match self.recv()? { Some(msg) => Some(( @@ -1816,6 +1841,8 @@ where #[allow(clippy::type_complexity)] #[inline] pub fn recv_buf_blocking_with_flags(&mut self) -> Result<(ClientId, Tag, Flags, &[u8]), Error> { + // # Safety + // No user-provided potentially unsafe parameters. unsafe { let msg = self.recv_blocking()?; Ok(( @@ -1830,6 +1857,8 @@ where /// Returns the next sender, tag, buf, looping until it becomes available #[inline] pub fn recv_buf_blocking(&mut self) -> Result<(ClientId, Tag, &[u8]), Error> { + // # Safety + // No user-provided potentially unsafe parameters. unsafe { let msg = self.recv_blocking()?; Ok(( @@ -1934,6 +1963,8 @@ where /// Marks the containing page as `safe_to_unmap`. /// This indicates, that the page may safely be unmapped by the sender. pub fn mark_safe_to_unmap(&mut self) { + // # Safety + // No user-provided potentially unsafe parameters. unsafe { (*self.page_mut()).receiver_joined(); } @@ -2058,6 +2089,94 @@ where hooks: HT, } +/// The trait for brokers. +pub trait Broker { + /// Getter to `is_shutting_down` + fn is_shutting_down(&self) -> bool; + + /// The hooks run for `on_timeout` + fn on_timeout(&mut self) -> Result<(), Error>; + + /// The main thing the `broker` does + fn broker_once(&mut self) -> Result; + + /// Getter to `exit_after` + fn exit_after(&self) -> Option; + + /// Setter for `exit_after` + fn set_exit_after(&mut self, n_clients: NonZeroUsize); + + /// Getter to `has_clients` + fn has_clients(&self) -> bool; + + /// Send the buffer out + fn send_buf(&mut self, tag: Tag, buf: &[u8]) -> Result<(), Error>; + + /// Getter to `num_clients_seen` + fn num_clients_seen(&self) -> usize; + + /// Getter to `nb_listeners` + fn nb_listeners(&self) -> usize; +} + +impl Broker for LlmpBroker +where + HT: LlmpHookTuple, + SP: ShMemProvider, +{ + fn is_shutting_down(&self) -> bool { + self.inner.is_shutting_down() + } + + fn on_timeout(&mut self) -> Result<(), Error> { + self.hooks.on_timeout_all() + } + + fn broker_once(&mut self) -> Result { + self.broker_once() + } + + fn exit_after(&self) -> Option { + self.inner.exit_cleanly_after + } + fn set_exit_after(&mut self, n_clients: NonZeroUsize) { + self.inner.set_exit_cleanly_after(n_clients); + } + + fn has_clients(&self) -> bool { + self.inner.has_clients() + } + + fn send_buf(&mut self, tag: Tag, buf: &[u8]) -> Result<(), Error> { + self.inner.llmp_out.send_buf(tag, buf) + } + + fn num_clients_seen(&self) -> usize { + self.inner.num_clients_seen + } + + fn nb_listeners(&self) -> usize { + self.inner.listeners.len() + } +} + +/// A set of brokers. +/// Limitation: the hooks must be the same. +#[cfg(feature = "std")] +#[derive(Default)] +pub struct Brokers { + /// the brokers + llmp_brokers: Vec>, +} + +#[cfg(feature = "std")] +impl Debug for Brokers { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + Debug::fmt("Brokers", f)?; + Ok(()) + } +} + /// A signal handler for the [`LlmpBroker`]. /// On unix, it handles signals /// On Windows - control signals (e.g., CTRL+C) @@ -2068,15 +2187,15 @@ pub struct LlmpShutdownSignalHandler { } #[cfg(unix)] -impl Handler for LlmpShutdownSignalHandler { - fn handle( +impl SignalHandler for LlmpShutdownSignalHandler { + unsafe fn handle( &mut self, _signal: Signal, _info: &mut siginfo_t, _context: Option<&mut ucontext_t>, ) { unsafe { - ptr::write_volatile(ptr::addr_of_mut!(self.shutting_down), true); + ptr::write_volatile(&raw mut self.shutting_down, true); } } @@ -2111,6 +2230,7 @@ where msg_tag: &mut Tag, msg_flags: &mut Flags, msg: &mut [u8], + new_msgs: &mut Vec<(Tag, Flags, Vec)>, ) -> Result; /// Hook called whenever there is a timeout. @@ -2132,6 +2252,7 @@ where msg_tag: &mut Tag, msg_flags: &mut Flags, msg: &mut [u8], + new_msgs: &mut Vec<(Tag, Flags, Vec)>, ) -> Result; /// Call all hook callbacks on timeout. @@ -2149,6 +2270,7 @@ where _msg_tag: &mut Tag, _msg_flags: &mut Flags, _msg: &mut [u8], + _new_msgs: &mut Vec<(Tag, Flags, Vec)>, ) -> Result { Ok(LlmpMsgHookResult::ForwardToClients) } @@ -2171,10 +2293,11 @@ where msg_tag: &mut Tag, msg_flags: &mut Flags, msg: &mut [u8], + new_msgs: &mut Vec<(Tag, Flags, Vec)>, ) -> Result { match self .0 - .on_new_message(inner, client_id, msg_tag, msg_flags, msg)? + .on_new_message(inner, client_id, msg_tag, msg_flags, msg, new_msgs)? { LlmpMsgHookResult::Handled => { // message handled, stop early @@ -2183,7 +2306,7 @@ where LlmpMsgHookResult::ForwardToClients => { // message should be forwarded, continue iterating self.1 - .on_new_message_all(inner, client_id, msg_tag, msg_flags, msg) + .on_new_message_all(inner, client_id, msg_tag, msg_flags, msg, new_msgs) } } } @@ -2211,6 +2334,120 @@ where } } +#[cfg(feature = "std")] +impl Brokers { + /// The constructor + #[must_use] + pub fn new() -> Self { + Self { + llmp_brokers: Vec::new(), + } + } + + /// Add another broker + pub fn add(&mut self, broker: Box) { + self.llmp_brokers.push(broker); + } + + #[cfg(any(all(unix, not(miri)), all(windows, feature = "std")))] + fn setup_handlers() { + #[cfg(all(unix, not(miri)))] + if let Err(e) = unsafe { setup_signal_handler(&raw mut LLMP_SIGHANDLER_STATE) } { + // We can live without a proper ctrl+c signal handler - Ignore. + log::info!("Failed to setup signal handlers: {e}"); + } else { + log::info!("Successfully setup signal handlers"); + } + + #[cfg(all(windows, feature = "std"))] + if let Err(e) = unsafe { setup_ctrl_handler(&raw mut LLMP_SIGHANDLER_STATE) } { + // We can live without a proper ctrl+c signal handler - Ignore. + log::info!("Failed to setup control handlers: {e}"); + } else { + log::info!( + "{}: Broker successfully setup control handlers", + std::process::id().to_string() + ); + } + } + + /// Loops until the last client quits the last broker, + /// forwarding and handling all incoming messages from clients for each broker. + /// Will call `on_timeout` roughly after `timeout` + /// Panics on error. + /// 5 millis of sleep can't hurt to keep busywait not at 100% + #[cfg(feature = "std")] + pub fn loop_with_timeouts(&mut self, timeout: Duration, sleep_time: Option) { + use super::current_milliseconds; + + #[cfg(any(all(unix, not(miri)), all(windows, feature = "std")))] + Self::setup_handlers(); + + let timeout = timeout.as_millis() as u64; + let mut end_time = current_milliseconds() + timeout; + + loop { + self.llmp_brokers.retain_mut(|broker| { + if broker.is_shutting_down() { + broker.send_buf(LLMP_TAG_EXITING, &[]).expect( + "Error when shutting down broker: Could not send LLMP_TAG_EXITING msg.", + ); + + return false; + } + + if current_milliseconds() > end_time { + broker + .on_timeout() + .expect("An error occurred in broker timeout. Exiting."); + end_time = current_milliseconds() + timeout; + } + + if broker + .broker_once() + .expect("An error occurred when brokering. Exiting.") + { + end_time = current_milliseconds() + timeout; + } + + if let Some(exit_after_count) = broker.exit_after() { + // log::trace!( + // "Clients connected: {} && > {} - {} >= {}", + // self.has_clients(), + // self.num_clients_seen, + // self.listeners.len(), + // exit_after_count + // ); + if !broker.has_clients() + && (broker.num_clients_seen() - broker.nb_listeners()) + >= exit_after_count.into() + { + // No more clients connected, and the amount of clients we were waiting for was previously connected. + // exit cleanly. + return false; + } + } + + true + }); + + if self.llmp_brokers.is_empty() { + break; + } + + #[cfg(feature = "std")] + if let Some(time) = sleep_time { + thread::sleep(time); + } + + #[cfg(not(feature = "std"))] + if let Some(time) = sleep_time { + panic!("Cannot sleep on no_std platform (requested {time:?})"); + } + } + } +} + impl LlmpBroker where HT: LlmpHookTuple, @@ -2414,8 +2651,10 @@ where /// Broker broadcast to its own page for all others to read /// Returns `true` if new messages were broker-ed + /// It is supposed that the message is never unmapped. #[inline] #[allow(clippy::cast_ptr_alignment)] + #[allow(clippy::too_many_lines)] unsafe fn handle_new_msgs(&mut self, client_id: ClientId) -> Result { let mut new_messages = false; @@ -2522,9 +2761,6 @@ where } // handle all other messages _ => { - // The message is not specifically for use. Let the user handle it, then forward it to the clients, if necessary. - let mut should_forward_msg = true; - let pos = if (client_id.0 as usize) < self.inner.llmp_clients.len() && self.inner.llmp_clients[client_id.0 as usize].id == client_id { @@ -2539,18 +2775,27 @@ where let map = &mut self.inner.llmp_clients[pos].current_recv_shmem; let msg_buf = (*msg).try_as_slice_mut(map)?; - if let LlmpMsgHookResult::Handled = self.hooks.on_new_message_all( + + // The message is not specifically for use. Let the user handle it, then forward it to the clients, if necessary. + let mut new_msgs: Vec<(Tag, Flags, Vec)> = Vec::new(); + if let LlmpMsgHookResult::ForwardToClients = self.hooks.on_new_message_all( &mut self.inner, client_id, &mut (*msg).tag, &mut (*msg).flags, msg_buf, + &mut new_msgs, )? { - should_forward_msg = false; + self.inner.forward_msg(msg)?; } - if should_forward_msg { - self.inner_mut().forward_msg(msg)?; + log::debug!("New msg vector: {}", new_msgs.len()); + for (new_msg_tag, new_msg_flag, new_msg) in new_msgs { + self.inner.llmp_out.send_buf_with_flags( + new_msg_tag, + new_msg_flag, + new_msg.as_ref(), + )?; } } } @@ -2560,7 +2805,7 @@ where #[cfg(any(all(unix, not(miri)), all(windows, feature = "std")))] fn setup_handlers() { #[cfg(all(unix, not(miri)))] - if let Err(e) = unsafe { setup_signal_handler(ptr::addr_of_mut!(LLMP_SIGHANDLER_STATE)) } { + if let Err(e) = unsafe { setup_signal_handler(&raw mut LLMP_SIGHANDLER_STATE) } { // We can live without a proper ctrl+c signal handler - Ignore. log::info!("Failed to setup signal handlers: {e}"); } else { @@ -2568,7 +2813,7 @@ where } #[cfg(all(windows, feature = "std"))] - if let Err(e) = unsafe { setup_ctrl_handler(ptr::addr_of_mut!(LLMP_SIGHANDLER_STATE)) } { + if let Err(e) = unsafe { setup_ctrl_handler(&raw mut LLMP_SIGHANDLER_STATE) } { // We can live without a proper ctrl+c signal handler - Ignore. log::info!("Failed to setup control handlers: {e}"); } else { @@ -2796,7 +3041,10 @@ where #[cfg(any(unix, all(windows, feature = "std")))] #[allow(clippy::unused_self)] fn is_shutting_down(&self) -> bool { - unsafe { ptr::read_volatile(ptr::addr_of!(LLMP_SIGHANDLER_STATE.shutting_down)) } + // # Safety + // No user-provided potentially unsafe parameters. + // Volatile read. + unsafe { ptr::read_volatile(&raw const (LLMP_SIGHANDLER_STATE.shutting_down)) } } /// Always returns true on platforms, where no shutdown signal handlers are supported @@ -2859,6 +3107,8 @@ where /// Tell the broker to disconnect this client from it. #[cfg(feature = "std")] fn announce_client_exit(sender: &mut LlmpSender, client_id: u32) -> Result<(), Error> { + // # Safety + // No user-provided potentially unsafe parameters. unsafe { let msg = sender .alloc_next(size_of::()) @@ -3467,12 +3717,14 @@ where ErrorKind::ConnectionRefused => { //connection refused. loop till the broker is up loop { - match TcpStream::connect((IP_LOCALHOST, port)) { - Ok(stream) => break stream, - Err(_) => { - log::info!("Connection Refused.. Retrying"); - } + if let Ok(stream) = TcpStream::connect((IP_LOCALHOST, port)) { + break stream; } + + log::debug!("Connection Refused. Retrying..."); + + #[cfg(feature = "std")] + thread::sleep(Duration::from_millis(50)); } } _ => return Err(Error::illegal_state(e.to_string())), @@ -3544,7 +3796,7 @@ mod tests { #[test] #[serial] #[cfg_attr(miri, ignore)] - pub fn test_llmp_connection() { + fn test_llmp_connection() { #[allow(unused_variables)] let shmem_provider = StdShMemProvider::new().unwrap(); let mut broker = match LlmpConnection::on_port(shmem_provider.clone(), 1337).unwrap() { diff --git a/libafl_bolts/src/minibsod.rs b/libafl_bolts/src/minibsod.rs index 4b4b4f2166..757f5ebd7f 100644 --- a/libafl_bolts/src/minibsod.rs +++ b/libafl_bolts/src/minibsod.rs @@ -1,7 +1,7 @@ //! Implements a mini-bsod generator. //! It dumps all important registers and prints a stacktrace. -#[cfg(target_vendor = "apple")] +#[cfg(any(target_vendor = "apple", target_os = "openbsd"))] use core::mem::size_of; use std::io::{BufWriter, Write}; #[cfg(any(target_os = "solaris", target_os = "illumos"))] @@ -454,34 +454,82 @@ pub fn dump_registers( } /// Write the content of all important registers -#[cfg(windows)] +#[cfg(all(target_os = "windows", target_arch = "x86_64"))] #[allow(clippy::similar_names)] pub fn dump_registers( writer: &mut BufWriter, context: &CONTEXT, ) -> Result<(), std::io::Error> { - write!(writer, "r8 : {:#016x}, ", context.R8)?; - write!(writer, "r9 : {:#016x}, ", context.R9)?; - write!(writer, "r10: {:#016x}, ", context.R10)?; - writeln!(writer, "r11: {:#016x}, ", context.R11)?; - write!(writer, "r12: {:#016x}, ", context.R12)?; - write!(writer, "r13: {:#016x}, ", context.R13)?; - write!(writer, "r14: {:#016x}, ", context.R14)?; - writeln!(writer, "r15: {:#016x}, ", context.R15)?; - write!(writer, "rdi: {:#016x}, ", context.Rdi)?; - write!(writer, "rsi: {:#016x}, ", context.Rsi)?; - write!(writer, "rbp: {:#016x}, ", context.Rbp)?; - writeln!(writer, "rbx: {:#016x}, ", context.Rbx)?; - write!(writer, "rdx: {:#016x}, ", context.Rdx)?; - write!(writer, "rax: {:#016x}, ", context.Rax)?; - write!(writer, "rcx: {:#016x}, ", context.Rcx)?; - writeln!(writer, "rsp: {:#016x}, ", context.Rsp)?; - write!(writer, "rip: {:#016x}, ", context.Rip)?; - writeln!(writer, "efl: {:#016x}, ", context.EFlags)?; + write!(writer, "r8 : {:#018x}, ", context.R8)?; + write!(writer, "r9 : {:#018x}, ", context.R9)?; + write!(writer, "r10: {:#018x}, ", context.R10)?; + writeln!(writer, "r11: {:#018x}, ", context.R11)?; + write!(writer, "r12: {:#018x}, ", context.R12)?; + write!(writer, "r13: {:#018x}, ", context.R13)?; + write!(writer, "r14: {:#018x}, ", context.R14)?; + writeln!(writer, "r15: {:#018x}, ", context.R15)?; + write!(writer, "rdi: {:#018x}, ", context.Rdi)?; + write!(writer, "rsi: {:#018x}, ", context.Rsi)?; + write!(writer, "rbp: {:#018x}, ", context.Rbp)?; + writeln!(writer, "rbx: {:#018x}, ", context.Rbx)?; + write!(writer, "rdx: {:#018x}, ", context.Rdx)?; + write!(writer, "rax: {:#018x}, ", context.Rax)?; + write!(writer, "rcx: {:#018x}, ", context.Rcx)?; + writeln!(writer, "rsp: {:#018x}, ", context.Rsp)?; + write!(writer, "rip: {:#018x}, ", context.Rip)?; + writeln!(writer, "efl: {:#018x}", context.EFlags)?; Ok(()) } +/// Write the content of all important registers +#[cfg(all(target_os = "windows", target_arch = "x86"))] +#[allow(clippy::similar_names)] +pub fn dump_registers( + writer: &mut BufWriter, + context: &CONTEXT, +) -> Result<(), std::io::Error> { + write!(writer, "eax: {:#010x}, ", context.Eax)?; + write!(writer, "ebx: {:#010x}, ", context.Ebx)?; + write!(writer, "ecx: {:#010x}, ", context.Ecx)?; + writeln!(writer, "edx: {:#010x}, ", context.Edx)?; + write!(writer, "edi: {:#010x}, ", context.Edi)?; + write!(writer, "esi: {:#010x}, ", context.Esi)?; + write!(writer, "esp: {:#010x}, ", context.Esp)?; + writeln!(writer, "ebp: {:#010x}, ", context.Ebp)?; + write!(writer, "eip: {:#010x}, ", context.Eip)?; + writeln!(writer, "efl: {:#010x} ", context.EFlags)?; + Ok(()) +} + +/// Write the content of all important registers +#[cfg(all(target_os = "windows", target_arch = "aarch64"))] +#[allow(clippy::similar_names)] +pub fn dump_registers( + writer: &mut BufWriter, + context: &CONTEXT, +) -> Result<(), std::io::Error> { + for reg in 0..29_usize { + write!(writer, "x{:02}: 0x{:016x} ", reg, unsafe { + context.Anonymous.X[reg] + })?; + if reg % 4 == 3 || reg == 28_usize { + writeln!(writer)?; + } + } + writeln!(writer, "pc : 0x{:016x} ", context.Pc)?; + writeln!(writer, "sp : 0x{:016x} ", context.Sp)?; + writeln!(writer, "fp : 0x{:016x} ", unsafe { + context.Anonymous.Anonymous.Fp + })?; + writeln!(writer, "lr : 0x{:016x} ", unsafe { + context.Anonymous.Anonymous.Lr + })?; + + Ok(()) +} + +/// Write the content of all important registers #[cfg(all(target_os = "haiku", target_arch = "x86_64"))] #[allow(clippy::similar_names)] pub fn dump_registers( @@ -1115,9 +1163,88 @@ mod tests { #[test] #[cfg_attr(miri, ignore)] - pub fn test_dump_registers() { + fn test_dump_registers() { let ucontext = ucontext().unwrap(); let mut writer = BufWriter::new(stdout()); dump_registers(&mut writer, &ucontext).unwrap(); } } + +#[cfg(windows)] +#[cfg(test)] +mod tests { + + use std::{ + io::{stdout, BufWriter}, + os::raw::c_void, + sync::mpsc, + }; + + use windows::Win32::{ + Foundation::{CloseHandle, DuplicateHandle, DUPLICATE_SAME_ACCESS, HANDLE}, + System::{ + Diagnostics::Debug::{ + GetThreadContext, CONTEXT, CONTEXT_FULL_AMD64, CONTEXT_FULL_ARM64, CONTEXT_FULL_X86, + }, + Threading::{GetCurrentProcess, GetCurrentThread, ResumeThread, SuspendThread}, + }, + }; + + use crate::minibsod::dump_registers; + + #[derive(Default)] + #[repr(align(16))] + struct Align16 { + pub ctx: CONTEXT, + } + + #[test] + #[cfg_attr(miri, ignore)] + fn test_dump_registers() { + let (tx, rx) = mpsc::channel(); + let (evt_tx, evt_rx) = mpsc::channel(); + let t = std::thread::spawn(move || { + let cur = unsafe { GetCurrentThread() }; + let proc = unsafe { GetCurrentProcess() }; + let mut out = HANDLE::default(); + unsafe { + DuplicateHandle( + proc, + cur, + proc, + &raw mut out, + 0, + true, + DUPLICATE_SAME_ACCESS, + ) + .unwrap(); + }; + tx.send(out.0 as i64).unwrap(); + evt_rx.recv().unwrap(); + }); + + let thread = rx.recv().unwrap(); + let thread = HANDLE(thread as *mut c_void); + eprintln!("thread: {thread:?}"); + unsafe { SuspendThread(thread) }; + + // https://stackoverflow.com/questions/56516445/getting-0x3e6-when-calling-getthreadcontext-for-debugged-thread + let mut c = Align16::default(); + if cfg!(target_arch = "x86") { + c.ctx.ContextFlags = CONTEXT_FULL_X86; + } else if cfg!(target_arch = "x86_64") { + c.ctx.ContextFlags = CONTEXT_FULL_AMD64; + } else if cfg!(target_arch = "aarch64") { + c.ctx.ContextFlags = CONTEXT_FULL_ARM64; + } + unsafe { GetThreadContext(thread, &raw mut (c.ctx)).unwrap() }; + + let mut writer = BufWriter::new(stdout()); + dump_registers(&mut writer, &c.ctx).unwrap(); + + unsafe { ResumeThread(thread) }; + unsafe { CloseHandle(thread).unwrap() }; + evt_tx.send(true).unwrap(); + t.join().unwrap(); + } +} diff --git a/libafl_bolts/src/os/mod.rs b/libafl_bolts/src/os/mod.rs index 5160bbb50b..1c4d83862c 100644 --- a/libafl_bolts/src/os/mod.rs +++ b/libafl_bolts/src/os/mod.rs @@ -115,6 +115,28 @@ pub fn dup(fd: RawFd) -> Result { } } +// Derived from https://github.com/RustPython/RustPython/blob/7996a10116681e9f85eda03413d5011b805e577f/stdlib/src/resource.rs#L113 +// LICENSE: MIT https://github.com/RustPython/RustPython/commit/37355d612a451fba7fef8f13a1b9fdd51310b37e +/// Get the peak rss (Resident Set Size) of the all child processes +/// that have terminated and been waited for +#[cfg(all(unix, feature = "std"))] +pub fn peak_rss_mb_child_processes() -> Result { + use core::mem; + use std::io; + + use libc::{rusage, RUSAGE_CHILDREN}; + + let rss = unsafe { + let mut rusage = mem::MaybeUninit::::uninit(); + if libc::getrusage(RUSAGE_CHILDREN, rusage.as_mut_ptr()) == -1 { + Err(io::Error::last_os_error()) + } else { + Ok(rusage.assume_init()) + } + }?; + Ok(rss.ru_maxrss >> 10) +} + /// "Safe" wrapper around dup2 /// /// # Safety diff --git a/libafl_bolts/src/os/unix_shmem_server.rs b/libafl_bolts/src/os/unix_shmem_server.rs index 77f0a6208f..d8adfb2059 100644 --- a/libafl_bolts/src/os/unix_shmem_server.rs +++ b/libafl_bolts/src/os/unix_shmem_server.rs @@ -13,7 +13,6 @@ use alloc::{ use core::{ mem::ManuallyDrop, ops::{Deref, DerefMut}, - ptr::addr_of, }; #[cfg(target_vendor = "apple")] use std::fs; @@ -694,7 +693,7 @@ where let copied_poll_fds: Vec = poll_fds.clone(); for poll_fd in copied_poll_fds { let revents = poll_fd.revents().expect("revents should not be None"); - let raw_polled_fd = unsafe { *((addr_of!(poll_fd)) as *const libc::pollfd) }.fd; + let raw_polled_fd = unsafe { *((&raw const poll_fd) as *const libc::pollfd) }.fd; if revents.contains(PollFlags::POLLHUP) { poll_fds.remove(poll_fds.iter().position(|item| *item == poll_fd).unwrap()); self.clients.remove(&raw_polled_fd); diff --git a/libafl_bolts/src/os/unix_signals.rs b/libafl_bolts/src/os/unix_signals.rs index 12d62f6604..acd8a5f48e 100644 --- a/libafl_bolts/src/os/unix_signals.rs +++ b/libafl_bolts/src/os/unix_signals.rs @@ -6,7 +6,7 @@ use core::mem::size_of; #[cfg(feature = "alloc")] use core::{ cell::UnsafeCell, - ptr::{self, addr_of_mut, write_volatile}, + ptr::{self, write_volatile}, sync::atomic::{compiler_fence, Ordering}, }; use core::{ @@ -387,16 +387,25 @@ impl Display for Signal { /// A trait for `LibAFL` signal handling #[cfg(feature = "alloc")] -pub trait Handler { +pub trait SignalHandler { /// Handle a signal - fn handle(&mut self, signal: Signal, info: &mut siginfo_t, _context: Option<&mut ucontext_t>); + /// + /// # Safety + /// This is generally not safe to call. It should only be called through the signal it was registered for. + /// Signal handling is hard, don't mess with it :). + unsafe fn handle( + &mut self, + signal: Signal, + info: &mut siginfo_t, + _context: Option<&mut ucontext_t>, + ); /// Return a list of signals to handle fn signals(&self) -> Vec; } #[cfg(feature = "alloc")] struct HandlerHolder { - handler: UnsafeCell<*mut dyn Handler>, + handler: UnsafeCell<*mut dyn SignalHandler>, } #[cfg(feature = "alloc")] @@ -441,6 +450,7 @@ unsafe fn handle_signal(sig: c_int, info: *mut siginfo_t, void: *mut c_void) { } /// Setup signal handlers in a somewhat rusty way. +/// /// This will allocate a signal stack and set the signal handlers accordingly. /// It is, for example, used in `LibAFL's` `InProcessExecutor` to restart the fuzzer in case of a crash, /// or to handle `SIGINT` in the broker process. @@ -451,7 +461,9 @@ unsafe fn handle_signal(sig: c_int, info: *mut siginfo_t, void: *mut c_void) { /// The handler pointer will be dereferenced, and the data the pointer points to may therefore not move. /// A lot can go south in signal handling. Be sure you know what you are doing. #[cfg(feature = "alloc")] -pub unsafe fn setup_signal_handler(handler: *mut T) -> Result<(), Error> { +pub unsafe fn setup_signal_handler( + handler: *mut T, +) -> Result<(), Error> { // First, set up our own stack to be used during segfault handling. (and specify `SA_ONSTACK` in `sigaction`) if SIGNAL_STACK_PTR.is_null() { SIGNAL_STACK_PTR = malloc(SIGNAL_STACK_SIZE); @@ -465,23 +477,23 @@ pub unsafe fn setup_signal_handler(handler: *mut T) -> Res let mut ss: stack_t = mem::zeroed(); ss.ss_size = SIGNAL_STACK_SIZE; ss.ss_sp = SIGNAL_STACK_PTR; - sigaltstack(addr_of_mut!(ss), ptr::null_mut() as _); + sigaltstack(&raw mut ss, ptr::null_mut() as _); let mut sa: sigaction = mem::zeroed(); - sigemptyset(addr_of_mut!(sa.sa_mask)); - sigaddset(addr_of_mut!(sa.sa_mask), SIGALRM); + sigemptyset(&raw mut sa.sa_mask); + sigaddset(&raw mut sa.sa_mask, SIGALRM); sa.sa_flags = SA_NODEFER | SA_SIGINFO | SA_ONSTACK; sa.sa_sigaction = handle_signal as usize; let signals = unsafe { (*handler).signals() }; for sig in signals { write_volatile( - addr_of_mut!(SIGNAL_HANDLERS[sig as usize]), + &raw mut SIGNAL_HANDLERS[sig as usize], Some(HandlerHolder { - handler: UnsafeCell::new(handler as *mut dyn Handler), + handler: UnsafeCell::new(handler as *mut dyn SignalHandler), }), ); - if sigaction(sig as i32, addr_of_mut!(sa), ptr::null_mut()) < 0 { + if sigaction(sig as i32, &raw mut sa, ptr::null_mut()) < 0 { #[cfg(feature = "std")] { let err_str = CString::new(format!("Failed to setup {sig} handler")).unwrap(); @@ -496,6 +508,7 @@ pub unsafe fn setup_signal_handler(handler: *mut T) -> Res } /// Function to get the current [`ucontext_t`] for this process. +/// /// This calls the libc `getcontext` function under the hood. /// It can be useful, for example for `dump_regs`. /// Note that calling this method may, of course, alter the state. @@ -506,7 +519,7 @@ pub unsafe fn setup_signal_handler(handler: *mut T) -> Res #[inline(always)] pub fn ucontext() -> Result { let mut ucontext = unsafe { mem::zeroed() }; - if cfg!(not(target_os = "openbsd")) { + if cfg!(not(any(target_os = "openbsd", target_os = "haiku"))) { if unsafe { getcontext(&mut ucontext) } == 0 { Ok(ucontext) } else { diff --git a/libafl_bolts/src/os/windows_exceptions.rs b/libafl_bolts/src/os/windows_exceptions.rs index 867b4d6917..d1098c98e8 100644 --- a/libafl_bolts/src/os/windows_exceptions.rs +++ b/libafl_bolts/src/os/windows_exceptions.rs @@ -5,12 +5,11 @@ use alloc::vec::Vec; use core::{ cell::UnsafeCell, fmt::{self, Display, Formatter}, - ptr::{self, addr_of, addr_of_mut, write_volatile}, + ptr::{self, write_volatile}, sync::atomic::{compiler_fence, Ordering}, }; use std::os::raw::{c_long, c_void}; -use log::info; use num_enum::FromPrimitive; pub use windows::Win32::{ Foundation::{BOOL, NTSTATUS}, @@ -423,9 +422,13 @@ pub static EXCEPTION_CODES_MAPPING: [ExceptionCode; 79] = [ ]; #[cfg(feature = "alloc")] -pub trait Handler { +pub trait ExceptionHandler { /// Handle an exception - fn handle( + /// + /// # Safety + /// This is generally not safe to call. It should only be called through the signal it was registered for. + /// Signal handling is hard, don't mess with it :). + unsafe fn handle( &mut self, exception_code: ExceptionCode, exception_pointers: *mut EXCEPTION_POINTERS, @@ -435,7 +438,7 @@ pub trait Handler { } struct HandlerHolder { - handler: UnsafeCell<*mut dyn Handler>, + handler: UnsafeCell<*mut dyn ExceptionHandler>, } pub const EXCEPTION_HANDLERS_SIZE: usize = 96; @@ -460,31 +463,28 @@ unsafe fn internal_handle_exception( .iter() .position(|x| *x == exception_code) .unwrap(); - match &EXCEPTION_HANDLERS[index] { - Some(handler_holder) => { - info!( - "{:?}: Handling exception {}", - std::process::id(), - exception_code - ); - let handler = &mut **handler_holder.handler.get(); - handler.handle(exception_code, exception_pointers); - EXCEPTION_CONTINUE_EXECUTION - } - None => { - info!( - "{:?}: No handler for exception {}", - std::process::id(), - exception_code - ); - // Go to Default one - let handler_holder = &EXCEPTION_HANDLERS[EXCEPTION_HANDLERS_SIZE - 1] - .as_ref() - .unwrap(); - let handler = &mut **handler_holder.handler.get(); - handler.handle(exception_code, exception_pointers); - EXCEPTION_CONTINUE_SEARCH - } + if let Some(handler_holder) = &EXCEPTION_HANDLERS[index] { + log::info!( + "{:?}: Handling exception {}", + std::process::id(), + exception_code + ); + let handler = &mut **handler_holder.handler.get(); + handler.handle(exception_code, exception_pointers); + EXCEPTION_CONTINUE_EXECUTION + } else { + log::info!( + "{:?}: No handler for exception {}", + std::process::id(), + exception_code + ); + // Go to Default one + let handler_holder = &EXCEPTION_HANDLERS[EXCEPTION_HANDLERS_SIZE - 1] + .as_ref() + .unwrap(); + let handler = &mut **handler_holder.handler.get(); + handler.handle(exception_code, exception_pointers); + EXCEPTION_CONTINUE_SEARCH } } @@ -512,7 +512,7 @@ pub unsafe extern "system" fn handle_exception( /// It is just casting into another type, nothing unsafe. #[must_use] pub const unsafe fn sig_ign() -> NativeSignalHandlerType { - core::mem::transmute(1u64) + core::mem::transmute(1usize) } type NativeSignalHandlerType = unsafe extern "C" fn(i32); @@ -529,7 +529,9 @@ unsafe extern "C" fn handle_signal(_signum: i32) { /// # Safety /// Exception handlers are usually ugly, handle with care! #[cfg(feature = "alloc")] -pub unsafe fn setup_exception_handler(handler: *mut T) -> Result<(), Error> { +pub unsafe fn setup_exception_handler( + handler: *mut T, +) -> Result<(), Error> { let exceptions = (*handler).exceptions(); let mut catch_assertions = false; for exception_code in exceptions { @@ -541,17 +543,17 @@ pub unsafe fn setup_exception_handler(handler: *mut T) -> .position(|x| *x == exception_code) .unwrap(); write_volatile( - addr_of_mut!(EXCEPTION_HANDLERS[index]), + &raw mut EXCEPTION_HANDLERS[index], Some(HandlerHolder { - handler: UnsafeCell::new(handler as *mut dyn Handler), + handler: UnsafeCell::new(handler as *mut dyn ExceptionHandler), }), ); } write_volatile( - addr_of_mut!(EXCEPTION_HANDLERS[EXCEPTION_HANDLERS_SIZE - 1]), + &raw mut (EXCEPTION_HANDLERS[EXCEPTION_HANDLERS_SIZE - 1]), Some(HandlerHolder { - handler: UnsafeCell::new(handler as *mut dyn Handler), + handler: UnsafeCell::new(handler as *mut dyn ExceptionHandler), }), ); compiler_fence(Ordering::SeqCst); @@ -590,7 +592,7 @@ pub(crate) unsafe fn setup_ctrl_handler( handler: *mut T, ) -> Result<(), Error> { write_volatile( - addr_of_mut!(CTRL_HANDLER), + &raw mut (CTRL_HANDLER), Some(CtrlHandlerHolder { handler: UnsafeCell::new(handler as *mut dyn CtrlHandler), }), @@ -601,21 +603,21 @@ pub(crate) unsafe fn setup_ctrl_handler( let result = SetConsoleCtrlHandler(Some(ctrl_handler), true); match result { Ok(()) => { - info!("SetConsoleCtrlHandler succeeded"); + log::info!("SetConsoleCtrlHandler succeeded"); Ok(()) } Err(err) => { - info!("SetConsoleCtrlHandler failed"); + log::info!("SetConsoleCtrlHandler failed"); Err(Error::from(err)) } } } unsafe extern "system" fn ctrl_handler(ctrl_type: u32) -> BOOL { - let handler = ptr::read_volatile(addr_of!(CTRL_HANDLER)); + let handler = ptr::read_volatile(&raw const (CTRL_HANDLER)); match handler { Some(handler_holder) => { - info!("{:?}: Handling ctrl {}", std::process::id(), ctrl_type); + log::info!("{:?}: Handling ctrl {}", std::process::id(), ctrl_type); let handler = &mut *handler_holder.handler.get(); if let Some(ctrl_handler) = handler.as_mut() { (*ctrl_handler).handle(ctrl_type).into() diff --git a/libafl_bolts/src/ownedref.rs b/libafl_bolts/src/ownedref.rs index 9cf4e19568..f882554646 100644 --- a/libafl_bolts/src/ownedref.rs +++ b/libafl_bolts/src/ownedref.rs @@ -9,13 +9,68 @@ use alloc::{ use core::{ clone::Clone, fmt::Debug, - ops::{Deref, DerefMut}, + ops::{Deref, DerefMut, RangeBounds}, + ptr::NonNull, slice, + slice::SliceIndex, }; use serde::{Deserialize, Deserializer, Serialize, Serializer}; -use crate::{shmem::ShMem, AsSlice, AsSliceMut, IntoOwned, Truncate}; +use crate::{ + shmem::ShMem, AsSizedSlice, AsSizedSliceMut, AsSlice, AsSliceMut, IntoOwned, Truncate, +}; + +/// Constant size array visitor for serde deserialization. +/// Mostly taken from +mod arrays { + use alloc::{boxed::Box, fmt, vec::Vec}; + use core::{convert::TryInto, marker::PhantomData}; + + use serde::{ + de::{SeqAccess, Visitor}, + Deserialize, Deserializer, + }; + + struct ArrayVisitor(PhantomData); + + impl<'de, T, const N: usize> Visitor<'de> for ArrayVisitor + where + T: Deserialize<'de>, + { + type Value = Box<[T; N]>; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str(&format!("an array of length {N}")) + } + + #[inline] + fn visit_seq(self, mut seq: A) -> Result + where + A: SeqAccess<'de>, + { + // can be optimized using MaybeUninit + let mut data = Vec::with_capacity(N); + for _ in 0..N { + match (seq.next_element())? { + Some(val) => data.push(val), + None => return Err(serde::de::Error::invalid_length(N, &self)), + } + } + match data.try_into() { + Ok(arr) => Ok(arr), + Err(_) => unreachable!(), + } + } + } + pub fn deserialize<'de, D, T, const N: usize>(deserializer: D) -> Result, D::Error> + where + D: Deserializer<'de>, + T: Deserialize<'de>, + { + deserializer.deserialize_tuple(N, ArrayVisitor::(PhantomData)) + } +} /// Private part of the unsafe marker, making sure this cannot be initialized directly. #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] @@ -38,13 +93,13 @@ impl UnsafeMarker { } } -impl<'a, T> Truncate for &'a [T] { +impl Truncate for &[T] { fn truncate(&mut self, len: usize) { *self = &self[..len]; } } -impl<'a, T> Truncate for &'a mut [T] { +impl Truncate for &mut [T] { fn truncate(&mut self, len: usize) { let mut value = core::mem::take(self); value = unsafe { value.get_unchecked_mut(..len) }; @@ -53,7 +108,7 @@ impl<'a, T> Truncate for &'a mut [T] { } /// Wrap a reference and convert to a [`Box`] on serialize -#[derive(Clone, Debug)] +#[derive(Debug)] pub enum OwnedRef<'a, T> where T: 'a + ?Sized, @@ -66,6 +121,30 @@ where Owned(Box), } +/// Special case, &\[u8] is a fat pointer containing the size implicitly. +impl Clone for OwnedRef<'_, [u8]> { + fn clone(&self) -> Self { + match self { + Self::RefRaw(_, _) => panic!("Cannot clone"), + Self::Ref(slice) => Self::Ref(slice), + Self::Owned(elt) => Self::Owned(elt.clone()), + } + } +} + +impl<'a, T> Clone for OwnedRef<'a, T> +where + T: 'a + Sized + Clone, +{ + fn clone(&self) -> Self { + match self { + Self::RefRaw(ptr, mrkr) => Self::RefRaw(*ptr, mrkr.clone()), + Self::Ref(slice) => Self::Ref(slice), + Self::Owned(elt) => Self::Owned(elt.clone()), + } + } +} + impl<'a, T> OwnedRef<'a, T> where T: 'a + ?Sized, @@ -82,9 +161,24 @@ where ); Self::RefRaw(ptr, UnsafeMarker::new()) } + + /// Returns true if the inner ref is a raw pointer, false otherwise. + #[must_use] + pub fn is_raw(&self) -> bool { + matches!(self, OwnedRef::Ref(_)) + } + + /// Return the inner value, if owned by the given object + #[must_use] + pub fn into_owned(self) -> Option> { + match self { + Self::Owned(val) => Some(val), + _ => None, + } + } } -impl<'a, T> OwnedRef<'a, T> +impl OwnedRef<'_, T> where T: Sized + 'static, { @@ -135,7 +229,18 @@ where } } -impl<'a, T> AsRef for OwnedRef<'a, T> +impl AsRef<[u8]> for OwnedRef<'_, [u8]> { + #[must_use] + fn as_ref(&self) -> &[u8] { + match self { + OwnedRef::RefRaw(r, _) => unsafe { (*r).as_ref().unwrap() }, + OwnedRef::Ref(r) => r, + OwnedRef::Owned(v) => v.as_ref(), + } + } +} + +impl AsRef for OwnedRef<'_, T> where T: Sized, { @@ -149,7 +254,7 @@ where } } -impl<'a, T> IntoOwned for OwnedRef<'a, T> +impl IntoOwned for OwnedRef<'_, T> where T: Sized + Clone, { @@ -208,7 +313,7 @@ where } } -impl<'a, T> OwnedRefMut<'a, T> +impl OwnedRefMut<'_, T> where T: Sized + 'static, { @@ -255,7 +360,7 @@ where } } -impl<'a, T: Sized> AsRef for OwnedRefMut<'a, T> { +impl AsRef for OwnedRefMut<'_, T> { #[must_use] fn as_ref(&self) -> &T { match self { @@ -266,7 +371,7 @@ impl<'a, T: Sized> AsRef for OwnedRefMut<'a, T> { } } -impl<'a, T: Sized> AsMut for OwnedRefMut<'a, T> { +impl AsMut for OwnedRefMut<'_, T> { #[must_use] fn as_mut(&mut self) -> &mut T { match self { @@ -277,7 +382,7 @@ impl<'a, T: Sized> AsMut for OwnedRefMut<'a, T> { } } -impl<'a, T> IntoOwned for OwnedRefMut<'a, T> +impl IntoOwned for OwnedRefMut<'_, T> where T: Sized + Clone, { @@ -407,9 +512,20 @@ impl<'a, T> OwnedSlice<'a, T> { pub fn iter(&self) -> Iter<'_, T> { <&Self as IntoIterator>::into_iter(self) } + + /// Returns a subslice of the slice. + #[must_use] + pub fn slice + SliceIndex<[T], Output = [T]>>( + &'a self, + range: R, + ) -> OwnedSlice<'a, T> { + OwnedSlice { + inner: OwnedSliceInner::Ref(&self[range]), + } + } } -impl<'a, 'it, T> IntoIterator for &'it OwnedSlice<'a, T> { +impl<'it, T> IntoIterator for &'it OwnedSlice<'_, T> { type Item = as Iterator>::Item; type IntoIter = Iter<'it, T>; @@ -419,7 +535,7 @@ impl<'a, 'it, T> IntoIterator for &'it OwnedSlice<'a, T> { } /// Create a new [`OwnedSlice`] from a vector -impl<'a, T> From> for OwnedSlice<'a, T> { +impl From> for OwnedSlice<'_, T> { fn from(vec: Vec) -> Self { Self { inner: OwnedSliceInner::Owned(vec), @@ -451,7 +567,7 @@ impl<'a, T> From> for OwnedSlice<'a, T> { Self { inner: match mut_slice.inner { OwnedMutSliceInner::RefRaw(ptr, len, unsafe_marker) => { - OwnedSliceInner::RefRaw(ptr as _, len, unsafe_marker) + OwnedSliceInner::RefRaw(ptr.cast_const(), len, unsafe_marker) } OwnedMutSliceInner::Ref(r) => OwnedSliceInner::Ref(r as _), OwnedMutSliceInner::Owned(v) => OwnedSliceInner::Owned(v), @@ -460,7 +576,7 @@ impl<'a, T> From> for OwnedSlice<'a, T> { } } -impl<'a, T: Sized> Deref for OwnedSlice<'a, T> { +impl Deref for OwnedSlice<'_, T> { type Target = [T]; fn deref(&self) -> &Self::Target { @@ -472,7 +588,7 @@ impl<'a, T: Sized> Deref for OwnedSlice<'a, T> { } } -impl<'a, T> IntoOwned for OwnedSlice<'a, T> +impl IntoOwned for OwnedSlice<'_, T> where T: Sized + Clone, { @@ -561,7 +677,7 @@ pub struct OwnedMutSlice<'a, T: 'a + Sized> { inner: OwnedMutSliceInner<'a, T>, } -impl<'a, 'it, T> IntoIterator for &'it mut OwnedMutSlice<'a, T> { +impl<'it, T> IntoIterator for &'it mut OwnedMutSlice<'_, T> { type Item = as Iterator>::Item; type IntoIter = IterMut<'it, T>; @@ -570,7 +686,7 @@ impl<'a, 'it, T> IntoIterator for &'it mut OwnedMutSlice<'a, T> { } } -impl<'a, 'it, T> IntoIterator for &'it OwnedMutSlice<'a, T> { +impl<'it, T> IntoIterator for &'it OwnedMutSlice<'_, T> { type Item = as Iterator>::Item; type IntoIter = Iter<'it, T>; @@ -643,7 +759,7 @@ impl<'a, T: 'a + Sized> OwnedMutSlice<'a, T> { } } -impl<'a, T: Sized> Deref for OwnedMutSlice<'a, T> { +impl Deref for OwnedMutSlice<'_, T> { type Target = [T]; fn deref(&self) -> &Self::Target { @@ -655,7 +771,7 @@ impl<'a, T: Sized> Deref for OwnedMutSlice<'a, T> { } } -impl<'a, T: Sized> DerefMut for OwnedMutSlice<'a, T> { +impl DerefMut for OwnedMutSlice<'_, T> { fn deref_mut(&mut self) -> &mut [T] { match &mut self.inner { OwnedMutSliceInner::RefRaw(rr, len, _) => unsafe { @@ -667,7 +783,7 @@ impl<'a, T: Sized> DerefMut for OwnedMutSlice<'a, T> { } } -impl<'a, T> IntoOwned for OwnedMutSlice<'a, T> +impl IntoOwned for OwnedMutSlice<'_, T> where T: Sized + Clone, { @@ -703,7 +819,7 @@ impl<'a, T: 'a + Clone> Clone for OwnedMutSlice<'a, T> { } /// Create a new [`OwnedMutSlice`] from a vector -impl<'a, T> From> for OwnedMutSlice<'a, T> { +impl From> for OwnedMutSlice<'_, T> { fn from(vec: Vec) -> Self { Self { inner: OwnedMutSliceInner::Owned(vec), @@ -753,6 +869,211 @@ impl<'a, T> From<&'a mut &'a mut [T]> for OwnedMutSlice<'a, T> { } } +/// Wrap a mutable slice and convert to a Box on serialize. +/// +/// We use a hidden inner enum so the public API can be safe, +/// unless the user uses the unsafe [`OwnedMutSizedSlice::from_raw_mut`]. +/// The variable length version is [`OwnedMutSlice`]. +#[derive(Debug)] +pub enum OwnedMutSizedSliceInner<'a, T: 'a + Sized, const N: usize> { + /// A raw ptr to a memory location of length N + RefRaw(*mut [T; N], UnsafeMarker), + /// A ptr to a mutable slice of the type + Ref(&'a mut [T; N]), + /// An owned [`Box`] of the type + Owned(Box<[T; N]>), +} + +impl<'a, T: 'a + Sized + Serialize, const N: usize> Serialize + for OwnedMutSizedSliceInner<'a, T, N> +{ + fn serialize(&self, se: S) -> Result + where + S: Serializer, + { + match self { + OwnedMutSizedSliceInner::RefRaw(rr, _) => unsafe { &**rr }.serialize(se), + OwnedMutSizedSliceInner::Ref(r) => (*r).serialize(se), + OwnedMutSizedSliceInner::Owned(b) => (*b).serialize(se), + } + } +} + +impl<'de, 'a, T: 'a + Sized, const N: usize> Deserialize<'de> for OwnedMutSizedSliceInner<'a, T, N> +where + T: Deserialize<'de>, +{ + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + arrays::deserialize(deserializer).map(OwnedMutSizedSliceInner::Owned) + } +} + +/// Wrap a mutable slice of constant size N and convert to a Box on serialize +#[allow(clippy::unsafe_derive_deserialize)] +#[derive(Debug, Serialize, Deserialize)] +pub struct OwnedMutSizedSlice<'a, T: 'a + Sized, const N: usize> { + inner: OwnedMutSizedSliceInner<'a, T, N>, +} + +impl<'it, T, const N: usize> IntoIterator for &'it mut OwnedMutSizedSlice<'_, T, N> { + type Item = as Iterator>::Item; + type IntoIter = IterMut<'it, T>; + + fn into_iter(self) -> Self::IntoIter { + self.as_sized_slice_mut().iter_mut() + } +} + +impl<'it, T, const N: usize> IntoIterator for &'it OwnedMutSizedSlice<'_, T, N> { + type Item = as Iterator>::Item; + type IntoIter = Iter<'it, T>; + + fn into_iter(self) -> Self::IntoIter { + self.as_sized_slice().iter() + } +} + +impl<'a, T: 'a + Sized, const N: usize> OwnedMutSizedSlice<'a, T, N> { + /// Create a new [`OwnedMutSizedSlice`] from a raw pointer + /// + /// # Safety + /// + /// The pointer must be valid and point to a map of the size `size_of() * N` + /// The content will be dereferenced in subsequent operations. + #[must_use] + pub unsafe fn from_raw_mut(ptr: NonNull<[T; N]>) -> OwnedMutSizedSlice<'a, T, N> { + Self { + inner: OwnedMutSizedSliceInner::RefRaw(ptr.as_ptr(), UnsafeMarker::new()), + } + } + + /// Returns an iterator over the slice. + pub fn iter(&self) -> Iter<'_, T> { + <&Self as IntoIterator>::into_iter(self) + } + + /// Returns a mutable iterator over the slice. + pub fn iter_mut(&mut self) -> IterMut<'_, T> { + <&mut Self as IntoIterator>::into_iter(self) + } +} + +impl Deref for OwnedMutSizedSlice<'_, T, N> { + type Target = [T; N]; + + fn deref(&self) -> &Self::Target { + match &self.inner { + OwnedMutSizedSliceInner::RefRaw(rr, _) => unsafe { &**rr }, + OwnedMutSizedSliceInner::Ref(r) => r, + OwnedMutSizedSliceInner::Owned(v) => v, + } + } +} + +impl DerefMut for OwnedMutSizedSlice<'_, T, N> { + fn deref_mut(&mut self) -> &mut [T; N] { + match &mut self.inner { + OwnedMutSizedSliceInner::RefRaw(rr, _) => unsafe { &mut **rr }, + OwnedMutSizedSliceInner::Ref(r) => r, + OwnedMutSizedSliceInner::Owned(v) => v, + } + } +} + +impl IntoOwned for OwnedMutSizedSlice<'_, T, N> +where + T: Sized + Clone, +{ + #[must_use] + fn is_owned(&self) -> bool { + match self.inner { + OwnedMutSizedSliceInner::RefRaw(..) | OwnedMutSizedSliceInner::Ref(_) => false, + OwnedMutSizedSliceInner::Owned(_) => true, + } + } + + #[must_use] + fn into_owned(self) -> Self { + let slice: Box<[T; N]> = match self.inner { + OwnedMutSizedSliceInner::RefRaw(rr, _) => unsafe { Box::from((*rr).clone()) }, + OwnedMutSizedSliceInner::Ref(r) => Box::from(r.clone()), + OwnedMutSizedSliceInner::Owned(v) => v, + }; + Self { + inner: OwnedMutSizedSliceInner::Owned(slice), + } + } +} + +impl<'a, T: 'a + Clone, const N: usize> Clone for OwnedMutSizedSlice<'a, T, N> { + fn clone(&self) -> Self { + let slice: Box<[T; N]> = match &self.inner { + OwnedMutSizedSliceInner::RefRaw(rr, _) => unsafe { Box::from((**rr).clone()) }, + OwnedMutSizedSliceInner::Ref(r) => Box::from((*r).clone()), + OwnedMutSizedSliceInner::Owned(v) => v.clone(), + }; + + Self { + inner: OwnedMutSizedSliceInner::Owned(slice), + } + } +} + +/// Create a new [`OwnedMutSizedSlice`] from a sized slice +impl From> for OwnedMutSizedSlice<'_, T, N> { + fn from(s: Box<[T; N]>) -> Self { + Self { + inner: OwnedMutSizedSliceInner::Owned(s), + } + } +} + +/// Create a Boxed slice from an [`OwnedMutSizedSlice`], or return the owned boxed sized slice. +impl<'a, T, const N: usize> From> for Box<[T; N]> +where + T: Clone, +{ + fn from(slice: OwnedMutSizedSlice<'a, T, N>) -> Self { + let slice = slice.into_owned(); + match slice.inner { + OwnedMutSizedSliceInner::Owned(b) => b, + _ => panic!("Could not own slice!"), + } + } +} + +/// Create a new [`OwnedMutSizedSlice`] from a reference to a boxed sized slice +#[allow(clippy::mut_mut)] // This makes use in some iterators easier +impl<'a, T, const N: usize> From<&'a mut Box<[T; N]>> for OwnedMutSizedSlice<'a, T, N> { + fn from(r: &'a mut Box<[T; N]>) -> Self { + Self { + inner: OwnedMutSizedSliceInner::Ref((*r).as_mut()), + } + } +} + +/// Create a new [`OwnedMutSizedSlice`] from a reference to ref to a slice +impl<'a, T, const N: usize> From<&'a mut [T; N]> for OwnedMutSizedSlice<'a, T, N> { + fn from(r: &'a mut [T; N]) -> Self { + Self { + inner: OwnedMutSizedSliceInner::Ref(r), + } + } +} + +/// Create a new [`OwnedMutSizedSlice`] from a reference to ref to a slice +#[allow(clippy::mut_mut)] // This makes use in some iterators easier +impl<'a, T, const N: usize> From<&'a mut &'a mut [T; N]> for OwnedMutSizedSlice<'a, T, N> { + fn from(r: &'a mut &'a mut [T; N]) -> Self { + Self { + inner: OwnedMutSizedSliceInner::Ref(r), + } + } +} + /// Wrap a C-style pointer and convert to a Box on serialize #[derive(Clone, Debug)] pub enum OwnedPtr { @@ -843,6 +1164,24 @@ impl OwnedMutPtr { pub unsafe fn from_raw_mut(ptr: *mut T) -> Self { Self::Ptr(ptr) } + + /// Get a pointer to the inner object + #[must_use] + pub fn as_ptr(&self) -> *const T { + match self { + OwnedMutPtr::Ptr(ptr) => *ptr, + OwnedMutPtr::Owned(owned) => &**owned, + } + } + + /// Get a mutable pointer to the inner object + #[must_use] + pub fn as_mut_ptr(&mut self) -> *mut T { + match self { + OwnedMutPtr::Ptr(ptr) => *ptr, + OwnedMutPtr::Owned(owned) => &mut **owned, + } + } } impl Serialize for OwnedMutPtr { diff --git a/libafl_bolts/src/rands/loaded_dice.rs b/libafl_bolts/src/rands/loaded_dice.rs index 004ef562ff..474515e14f 100644 --- a/libafl_bolts/src/rands/loaded_dice.rs +++ b/libafl_bolts/src/rands/loaded_dice.rs @@ -11,7 +11,7 @@ Assume we want to sample from the following distribution: `p(0)=0.5, p(1)=0.3, p use libafl_bolts::rands::{StdRand, loaded_dice::LoadedDiceSampler}; fn main() { let mut rand = StdRand::new(); - let mut sampler = LoadedDiceSampler::new(&[0.5, 0.3, 0.1, 0.1]); + let mut sampler = LoadedDiceSampler::new(&[0.5, 0.3, 0.1, 0.1]).unwrap(); let iter: usize = 100; for i in (0..iter) { println!("{}", sampler.sample(&mut rand)); @@ -25,6 +25,7 @@ Original code by @eqv, see use alloc::vec::Vec; use super::Rand; +use crate::Error; /// Helper struct for [`LoadedDiceSampler`] #[derive(Clone, Debug, PartialEq)] @@ -53,15 +54,23 @@ pub struct LoadedDiceSampler { impl LoadedDiceSampler { /// Create a new [`LoadedDiceSampler`] with the given probabilities - #[must_use] - pub fn new(probs: &[f64]) -> Self { + pub fn new(probs: &[f64]) -> Result { + if probs.is_empty() { + return Err(Error::illegal_argument( + "Tried to construct LoadedDiceSampler with empty probs array", + )); + } let entries = LoadedDiceSampler::construct_table(probs); - Self { entries } + Ok(Self { entries }) } /// Get one sample according to the predefined probabilities. pub fn sample(&mut self, rand: &mut R) -> usize { - let index = rand.below(self.entries.len()); + let len = self.entries.len(); + debug_assert_ne!(len, 0, "Lenght should never be 0 here."); + // # SAFETY + // len can never be 0 here. + let index = rand.below(unsafe { len.try_into().unwrap_unchecked() }); let coin = rand.next_float(); let entry = &self.entries[index]; if coin > entry.prob_of_val { @@ -114,7 +123,7 @@ mod tests { let base = (0..len).map(|_| rng.next_float()).collect::>(); let sum: f64 = base.iter().sum(); let base = base.iter().map(|v| v / sum).collect::>(); - let mut sampler = LoadedDiceSampler::new(&base); + let mut sampler = LoadedDiceSampler::new(&base).unwrap(); let mut res: Vec = vec![0; len]; let iter: usize = 1000000; for _ in 0..iter { diff --git a/libafl_bolts/src/rands/mod.rs b/libafl_bolts/src/rands/mod.rs index e7516e8358..d2ba03ee5e 100644 --- a/libafl_bolts/src/rands/mod.rs +++ b/libafl_bolts/src/rands/mod.rs @@ -1,9 +1,11 @@ //! The random number generators of `LibAFL` +#[cfg(all(not(feature = "std"), target_has_atomic = "ptr"))] +use core::sync::atomic::{AtomicUsize, Ordering}; use core::{ debug_assert, fmt::Debug, - sync::atomic::{AtomicUsize, Ordering}, + num::{NonZero, NonZeroUsize}, }; use serde::{de::DeserializeOwned, Deserialize, Serialize}; @@ -11,6 +13,9 @@ use serde::{de::DeserializeOwned, Deserialize, Serialize}; #[cfg(feature = "alloc")] pub mod loaded_dice; +#[cfg(all(not(feature = "std"), target_has_atomic = "ptr"))] +static SEED_COUNTER: AtomicUsize = AtomicUsize::new(0); + /// Return a pseudo-random seed. For `no_std` environments, a single deterministic sequence is used. #[must_use] #[allow(unreachable_code)] @@ -23,10 +28,7 @@ pub fn random_seed() -> u64 { 4 } -static SEED_COUNTER: AtomicUsize = AtomicUsize::new(0); - -#[allow(dead_code)] -#[cfg(target_has_atomic = "ptr")] +#[cfg(all(not(feature = "std"), target_has_atomic = "ptr"))] fn random_seed_deterministic() -> u64 { let mut seed = SEED_COUNTER.fetch_add(1, Ordering::Relaxed) as u64; splitmix64(&mut seed) @@ -51,7 +53,8 @@ fn splitmix64(x: &mut u64) -> u64 { z ^ (z >> 31) } -/// The standard rand implementation for `LibAFL`. +/// The standard [`Rand`] implementation for `LibAFL`. +/// /// It is usually the right choice, with very good speed and a reasonable randomness. /// Not cryptographically secure (which is not what you want during fuzzing ;) ) pub type StdRand = RomuDuoJrRand; @@ -71,12 +74,10 @@ where // create iterator let mut iter = from.into_iter(); - if iter.len() == 0 { - return None; - } + let len = NonZero::new(iter.len())?; // pick a random, valid index - let index = fast_bound(rand, iter.len()); + let index = fast_bound(rand, len); // return the item chosen Some(iter.nth(index).unwrap()) @@ -90,8 +91,14 @@ where /// See: [An optimal algorithm for bounded random integers](https://github.com/apple/swift/pull/39143). #[inline] #[must_use] -pub fn fast_bound(rand: u64, n: usize) -> usize { - debug_assert_ne!(n, 0); +pub fn fast_bound(rand: u64, n: NonZeroUsize) -> usize { + let mul = u128::from(rand).wrapping_mul(u128::from(n.get() as u64)); + (mul >> 64) as usize +} + +#[inline] +#[must_use] +fn fast_bound_usize(rand: u64, n: usize) -> usize { let mul = u128::from(rand).wrapping_mul(u128::from(n as u64)); (mul >> 64) as usize } @@ -126,15 +133,26 @@ pub trait Rand: Debug + Serialize + DeserializeOwned { /// Gets a value below the given bound (exclusive) #[inline] - fn below(&mut self, upper_bound_excl: usize) -> usize { + fn below(&mut self, upper_bound_excl: NonZeroUsize) -> usize { fast_bound(self.next(), upper_bound_excl) } + /// Gets a value between [0, n] + fn zero_upto(&mut self, n: usize) -> usize { + fast_bound_usize(self.next(), n) + } + /// Gets a value between the given lower bound (inclusive) and upper bound (inclusive) #[inline] fn between(&mut self, lower_bound_incl: usize, upper_bound_incl: usize) -> usize { debug_assert!(lower_bound_incl <= upper_bound_incl); - lower_bound_incl + self.below(upper_bound_incl - lower_bound_incl + 1) + // # Safety + // We check that the upper_bound_incl <= lower_bound_incl above (alas only in debug), so the below is fine. + // Even if we encounter a 0 in release here, the worst-case scenario should be an invalid return value. + lower_bound_incl + + self.below(unsafe { + NonZero::new(upper_bound_incl - lower_bound_incl + 1).unwrap_unchecked() + }) } /// Convenient variant of [`choose`]. @@ -159,17 +177,19 @@ pub trait Rand: Debug + Serialize + DeserializeOwned { // when the Iterator is an ExactSizeIterator. This has a large performance impact on e.g. // seq_iter_choose_from_1000. if upper == Some(lower) { - return if lower == 0 { - None - } else { + return if let Some(lower) = NonZero::new(lower) { iter.nth(self.below(lower)) + } else { + None }; } // Continue until the iterator is exhausted loop { if lower > 1 { - let ix = self.below(lower + consumed); + // # Safety + // lower is > 1, we don't consume more than usize elements, so this should always be non-0. + let ix = self.below(unsafe { NonZero::new(lower + consumed).unwrap_unchecked() }); let skip = if ix < lower { result = iter.nth(ix); lower - (ix + 1) @@ -189,7 +209,9 @@ pub trait Rand: Debug + Serialize + DeserializeOwned { return result; } consumed += 1; - if self.below(consumed) == 0 { + // # SAFETY + // `consumed` can never be 0 here. We just increased it by 1 above. + if self.below(unsafe { NonZero::new(consumed).unwrap_unchecked() }) == 0 { result = elem; } } @@ -528,15 +550,18 @@ impl XkcdRand { #[cfg(test)] mod tests { - use crate::rands::{ - Rand, RomuDuoJrRand, RomuTrioRand, Sfc64Rand, StdRand, XorShift64Rand, - Xoshiro256PlusPlusRand, + use crate::{ + nonzero, + rands::{ + Rand, RomuDuoJrRand, RomuTrioRand, Sfc64Rand, StdRand, XorShift64Rand, + Xoshiro256PlusPlusRand, + }, }; fn test_single_rand(rand: &mut R) { assert_ne!(rand.next(), rand.next()); - assert!(rand.below(100) < 100); - assert_eq!(rand.below(1), 0); + assert!(rand.below(nonzero!(100)) < 100); + assert_eq!(rand.below(nonzero!(1)), 0); assert_eq!(rand.between(10, 10), 10); assert!(rand.between(11, 20) > 10); } @@ -689,7 +714,7 @@ pub mod pybind { } } - #[derive(Serialize, Deserialize, Debug, Clone)] + #[derive(Serialize, Deserialize, Debug)] enum PythonRandWrapper { Std(Py), } @@ -697,7 +722,7 @@ pub mod pybind { /// Rand Trait binding #[pyclass(unsendable, name = "Rand")] #[allow(clippy::unsafe_derive_deserialize)] - #[derive(Serialize, Deserialize, Debug, Clone)] + #[derive(Serialize, Deserialize, Debug)] pub struct PythonRand { wrapper: PythonRandWrapper, } @@ -730,7 +755,7 @@ pub mod pybind { } /// Register the classes to the python module - pub fn register(_py: Python, m: &PyModule) -> PyResult<()> { + pub fn register(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; m.add_class::()?; Ok(()) diff --git a/libafl_bolts/src/serdeany.rs b/libafl_bolts/src/serdeany.rs index 8955cd88d8..ccc34a59cc 100644 --- a/libafl_bolts/src/serdeany.rs +++ b/libafl_bolts/src/serdeany.rs @@ -1,29 +1,32 @@ //! Poor-rust-man's downcasts for stuff we send over the wire (or shared maps) +#[cfg(feature = "stable_anymap")] +use alloc::borrow::Cow; use alloc::boxed::Box; -#[cfg(feature = "unsafe_stable_anymap")] -use alloc::string::{String, ToString}; -#[cfg(feature = "unsafe_stable_anymap")] +#[cfg(feature = "stable_anymap")] use core::any::type_name; -#[cfg(not(feature = "unsafe_stable_anymap"))] +#[cfg(not(feature = "stable_anymap"))] use core::any::TypeId; use core::{any::Any, fmt::Debug}; use serde::{de::DeserializeSeed, Deserialize, Deserializer, Serialize, Serializer}; pub use serdeany_registry::*; -#[cfg(not(feature = "unsafe_stable_anymap"))] +#[cfg(not(feature = "stable_anymap"))] use crate::anymap::unpack_type_id; /// The type of a stored type in this anymap (`u128`) -#[cfg(not(feature = "unsafe_stable_anymap"))] +#[cfg(not(feature = "stable_anymap"))] pub type TypeRepr = u128; /// The type of a stored type in this anymap (`String`) -#[cfg(feature = "unsafe_stable_anymap")] -pub type TypeRepr = String; +#[cfg(feature = "stable_anymap")] +pub type TypeRepr = Cow<'static, str>; -#[cfg(not(feature = "unsafe_stable_anymap"))] +/// Error string when no types at all have been registered yet. +pub(crate) const ERR_EMPTY_TYPES_REGISTER: &str = "Empty types registry. Please enable the `serdeany_autoreg` feature in libafl_bolts or register all required types manually using RegistryBuilder::register()."; + +#[cfg(not(feature = "stable_anymap"))] fn type_repr() -> TypeRepr where T: 'static, @@ -31,7 +34,7 @@ where unpack_type_id(TypeId::of::()) } -#[cfg(not(feature = "unsafe_stable_anymap"))] +#[cfg(not(feature = "stable_anymap"))] fn type_repr_owned() -> TypeRepr where T: 'static, @@ -39,24 +42,26 @@ where unpack_type_id(TypeId::of::()) } -#[cfg(feature = "unsafe_stable_anymap")] +#[cfg(feature = "stable_anymap")] fn type_repr_owned() -> TypeRepr { - type_name::().to_string() + Cow::Borrowed(type_name::()) } -#[cfg(feature = "unsafe_stable_anymap")] +#[cfg(feature = "stable_anymap")] fn type_repr() -> &'static str { type_name::() } /// A (de)serializable Any trait pub trait SerdeAny: Any + erased_serde::Serialize + Debug { - /// returns this as Any trait + /// Returns this type as [`Any`] trait. fn as_any(&self) -> &dyn Any; - /// returns this as mutable Any trait + /// Returns this as mutable [`Any`] trait. fn as_any_mut(&mut self) -> &mut dyn Any; - /// returns this as boxed Any trait + /// Returns this as boxed [`Any`] trait. fn as_any_boxed(self: Box) -> Box; + /// Returns the [`core::any::type_name`] of this type. + fn type_name(&self) -> &'static str; } /// Wrap a type for serialization @@ -119,7 +124,7 @@ pub mod serdeany_registry { hash_map::{Values, ValuesMut}, HashMap, }; - use serde::{Deserialize, Serialize}; + use serde::{de, Deserialize, Serialize}; use crate::{ serdeany::{ @@ -129,7 +134,8 @@ pub mod serdeany_registry { Error, }; - /// A [`HashMap`] that maps from [`TypeRepr`] to a deserializer and its [`TypeId`]. + /// A [`HashMap`] that maps from [`TypeRepr`] to a deserializer and its [`TypeRepr`]. + /// We store the [`TypeId`] to assert we don't have duplicate types in the case of the `stable_anymap` feature. type DeserializeCallbackMap = HashMap, TypeId)>; /// Visitor object used internally for the [`crate::serdeany::SerdeAny`] registry. @@ -148,13 +154,16 @@ pub mod serdeany_registry { V: serde::de::SeqAccess<'de>, { let id: TypeRepr = visitor.next_element()?.unwrap(); + + let registry = &raw const REGISTRY; let cb = unsafe { - REGISTRY + (*registry) .deserializers .as_ref() - .expect("Empty types registry") + .ok_or_else(|| + de::Error::custom(super::ERR_EMPTY_TYPES_REGISTER))? .get(&id) - .expect("Cannot deserialize an unregistered type") + .ok_or_else(|| de::Error::custom(format_args!("Cannot deserialize the unregistered type with id {id}. Enable the `serde_autoreg` feature in libafl_bolts or register all requried types manually.")))? .0 }; let seed = DeserializeCallbackSeed:: { cb }; @@ -187,8 +196,10 @@ pub mod serdeany_registry { ) }); - #[cfg(feature = "unsafe_stable_anymap")] - assert_eq!(_entry.1, TypeId::of::(), "Fatal safety error: TypeId of type {} is not equals to the deserializer's TypeId for this type! Two registered types have the same type_name!", type_repr::()); + // We assert that only one element with the given TypeId is in the map. + // This is only necessary for stable_anymap where we don't directly use the TypeId, but the type_name instead. + #[cfg(feature = "stable_anymap")] + assert_eq!(_entry.1, TypeId::of::(), "Fatal safety error: TypeId of type {} is not equal to the deserializer's TypeId for this type! Two registered types have the same type_name!", type_repr::()); } pub fn finalize(&mut self) { @@ -217,8 +228,9 @@ pub mod serdeany_registry { where T: crate::serdeany::SerdeAny + Serialize + serde::de::DeserializeOwned, { + let registry = &raw mut REGISTRY; unsafe { - REGISTRY.register::(); + (*registry).register::(); } } @@ -227,9 +239,10 @@ pub mod serdeany_registry { /// # Safety /// This may never be called concurrently or at the same time as `register`. /// It dereferences the `REGISTRY` hashmap and adds the given type to it. - pub fn finalize() { + pub unsafe fn finalize() { + let registry = &raw mut REGISTRY; unsafe { - REGISTRY.finalize(); + (*registry).finalize(); } } } @@ -251,22 +264,6 @@ pub mod serdeany_registry { } } - /* - #[cfg(feature = "anymap_debug")] - impl fmt::Debug for SerdeAnyMap { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - let json = serde_json::to_string(&self); - write!(f, "SerdeAnyMap: [{:?}]", json) - } - } - - #[cfg(not(feature = "anymap_debug"))] - impl fmt::Debug for SerdeAnyMap { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!(f, "SerdeAnymap with {} elements", self.len()) - } - }*/ - #[allow(unused_qualifications)] impl SerdeAnyMap { /// Get an element from the map. @@ -277,7 +274,7 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; self.map @@ -293,7 +290,7 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; self.map @@ -309,7 +306,7 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; self.map @@ -351,15 +348,16 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; + let registry = &raw const REGISTRY; assert!( unsafe { - REGISTRY + (*registry) .deserializers .as_ref() - .expect("Empty types registry") + .expect(super::ERR_EMPTY_TYPES_REGISTER) .get(type_repr) .is_some() }, @@ -410,7 +408,7 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; self.map.contains_key(type_repr) @@ -458,7 +456,7 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; match self.map.get(type_repr) { @@ -475,7 +473,7 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; match self.map.get_mut(type_repr) { @@ -494,7 +492,7 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; match self.map.get_mut(type_repr) { @@ -522,7 +520,7 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; #[allow(clippy::manual_map)] @@ -548,7 +546,7 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; #[allow(clippy::manual_map)] @@ -614,15 +612,15 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; - + let registry = &raw const REGISTRY; assert!( unsafe { - REGISTRY + (*registry) .deserializers .as_ref() - .expect("Empty types registry") + .expect(super::ERR_EMPTY_TYPES_REGISTER) .get(type_repr) .is_some() }, @@ -721,7 +719,7 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; self.map.contains_key(type_repr) @@ -735,7 +733,7 @@ pub mod serdeany_registry { T: crate::serdeany::SerdeAny, { let type_repr = type_repr::(); - #[cfg(not(feature = "unsafe_stable_anymap"))] + #[cfg(not(feature = "stable_anymap"))] let type_repr = &type_repr; match self.map.get(type_repr) { @@ -768,9 +766,18 @@ impl Serialize for dyn crate::serdeany::SerdeAny { { use serde::ser::SerializeSeq; - let id = crate::anymap::unpack_type_id(self.type_id()); + #[cfg(not(feature = "stable_anymap"))] + let type_id = crate::anymap::unpack_type_id(self.type_id()); + #[cfg(not(feature = "stable_anymap"))] + let type_id = &type_id; + + // For the stable anymap, we use the `type_name` as type id. + // Of course this may go wrong... :) + #[cfg(feature = "stable_anymap")] + let type_id = self.type_name(); + let mut seq = se.serialize_seq(Some(2))?; - seq.serialize_element(&id)?; + seq.serialize_element(type_id)?; seq.serialize_element(&crate::serdeany::Wrap(self))?; seq.end() } @@ -818,6 +825,26 @@ macro_rules! create_register { ($struct_type:ty) => {}; } +/// Manually register a `SerdeAny` type in the [`RegistryBuilder`] +/// +/// Do nothing with the `serdeany_autoreg` feature, as this will be previously registered by ctor. +#[cfg(all(feature = "serdeany_autoreg", not(miri)))] +#[macro_export] +macro_rules! create_manual_register { + ($struct_type:ty) => {}; +} + +/// Manually register a `SerdeAny` type in the [`RegistryBuilder`] +/// +/// Do nothing with the `serdeany_autoreg` feature, as this will be previously registered by ctor. +#[cfg(not(all(feature = "serdeany_autoreg", not(miri))))] +#[macro_export] +macro_rules! create_manual_register { + ($struct_type:ty) => { + $crate::serdeany::RegistryBuilder::register::<$struct_type>(); + }; +} + /// Implement a [`SerdeAny`], registering it in the [`RegistryBuilder`] when on std #[macro_export] macro_rules! impl_serdeany { @@ -840,9 +867,12 @@ macro_rules! impl_serdeany { ) -> $crate::alloc::boxed::Box { self } + + fn type_name(&self) -> &'static str { + core::any::type_name::() + } } - #[cfg(any(not(feature = "serdeany_autoreg"), miri))] impl< $( $lt $( : $clt $(+ $dlt )* )? ),+ > $struct_name < $( $lt ),+ > { /// Manually register this type at a later point in time @@ -850,7 +880,9 @@ macro_rules! impl_serdeany { /// # Safety /// This may never be called concurrently as it dereferences the `RegistryBuilder` without acquiring a lock. pub unsafe fn register() { - $crate::serdeany::RegistryBuilder::register::<$struct_name < $( $lt ),+ >>(); + $( + $crate::create_manual_register!($struct_name < $( $opt ),+ >); + )* } } @@ -877,9 +909,12 @@ macro_rules! impl_serdeany { ) -> $crate::alloc::boxed::Box { self } + + fn type_name(&self) -> &'static str { + core::any::type_name::() + } } - #[cfg(any(not(feature = "serdeany_autoreg"), miri))] impl $struct_name { /// Manually register this type at a later point in time /// @@ -887,7 +922,7 @@ macro_rules! impl_serdeany { /// This may never be called concurrently as it dereferences the `RegistryBuilder` without acquiring a lock. #[allow(unused)] pub unsafe fn register() { - $crate::serdeany::RegistryBuilder::register::<$struct_name>(); + $crate::create_manual_register!($struct_name); } } diff --git a/libafl_bolts/src/shmem.rs b/libafl_bolts/src/shmem.rs index fb06682134..4c6631e3ac 100644 --- a/libafl_bolts/src/shmem.rs +++ b/libafl_bolts/src/shmem.rs @@ -102,8 +102,10 @@ impl ShMemDescription { } } +/// The id describing shared memory for the current provider +/// /// An id associated with a given shared memory mapping ([`ShMem`]), which can be used to -/// establish shared-mappings between proccesses. +/// establish shared-mappings between processes. /// Id is a file descriptor if you use `MmapShMem` or `AshmemShMem`. /// That means you have to use shmem server to access to the shmem segment from other processes in these cases. /// On the other hand, id is a unique identifier if you use `CommonUnixShMem` or `Win32ShMem`. @@ -193,6 +195,7 @@ impl Display for ShMemId { } /// A [`ShMem`] is an interface to shared maps. +/// /// They are the backbone of [`crate::llmp`] for inter-process communication. /// All you need for scaling on a new target is to implement this interface, as well as the respective [`ShMemProvider`]. pub trait ShMem: Sized + Debug + Clone + DerefMut { @@ -239,6 +242,7 @@ pub trait ShMem: Sized + Debug + Clone + DerefMut { } /// A [`ShMemProvider`] provides access to shared maps. +/// /// They are the backbone of [`crate::llmp`] for inter-process communication. /// All you need for scaling on a new target is to implement this interface, as well as the respective [`ShMem`]. pub trait ShMemProvider: Clone + Default + Debug { @@ -317,6 +321,7 @@ pub trait ShMemProvider: Clone + Default + Debug { } /// An [`ShMemProvider`] that does not provide any [`ShMem`]. +/// /// This is mainly for testing and type magic. /// The resulting [`NopShMem`] is backed by a simple byte buffer to do some simple non-shared things with. /// Calling [`NopShMemProvider::shmem_from_id_and_size`] will return new maps for the same id every time. @@ -651,11 +656,11 @@ pub mod unix_shmem { ops::{Deref, DerefMut}, ptr, slice, }; - use std::process; + use std::{io, process}; use libc::{ - c_int, c_uchar, close, ftruncate, mmap, munmap, shm_open, shm_unlink, shmat, shmctl, - shmdt, shmget, + c_int, c_uchar, close, fcntl, ftruncate, mmap, munmap, shm_open, shm_unlink, shmat, + shmctl, shmdt, shmget, }; use crate::{ @@ -686,14 +691,21 @@ pub mod unix_shmem { impl MmapShMem { /// Create a new [`MmapShMem`] - /// This will *NOT* automatically delete the shmem files, meaning that it's user's responsibility to delete all `/dev/shm/libafl_*` after fuzzing - pub fn new(map_size: usize, rand_id: u32) -> Result { + /// + /// At most [`MAX_MMAP_FILENAME_LEN`] - 2 values from filename will be used. Do not include any characters that are illegal as filenames + /// + /// This will *NOT* automatically delete the shmem files, meaning that it's user's responsibility to delete them after fuzzing + pub fn new(map_size: usize, filename: &[u8]) -> Result { + // # Safety + // No user-provided potentially unsafe parameters. + // FFI Calls. unsafe { - let full_file_name = format!("/libafl_{}_{}", process::id(), rand_id); - let mut filename_path = [0_u8; MAX_MMAP_FILENAME_LEN]; - filename_path - .copy_from_slice(&full_file_name.as_bytes()[..MAX_MMAP_FILENAME_LEN]); - filename_path[MAX_MMAP_FILENAME_LEN - 1] = 0; // Null terminate! + let mut filename_path: [u8; 20] = [0_u8; MAX_MMAP_FILENAME_LEN]; + // Keep room for the leading slash and trailing NULL. + let max_copy = usize::min(filename.len(), MAX_MMAP_FILENAME_LEN - 2); + filename_path[0] = b'/'; + filename_path[1..=max_copy].copy_from_slice(&filename[..max_copy]); + log::info!( "{} Creating shmem {} {:#?}", map_size, @@ -758,6 +770,9 @@ pub mod unix_shmem { #[allow(clippy::unnecessary_wraps)] fn shmem_from_id_and_size(id: ShMemId, map_size: usize) -> Result { + // # Safety + // No user-provided potentially unsafe parameters. + // FFI Calls. unsafe { /* map the shared memory segment to the address space of the process */ #[cfg(target_vendor = "apple")] @@ -825,52 +840,42 @@ pub mod unix_shmem { pub fn filename_path(&self) -> &Option<[u8; MAX_MMAP_FILENAME_LEN]> { &self.filename_path } - } - /// A [`ShMemProvider`] which uses [`shm_open`] and [`mmap`] to provide shared memory mappings. - #[cfg(unix)] - #[derive(Clone, Debug)] - pub struct MmapShMemProvider {} + /// Makes a shared memory mapping available in other processes. + /// + /// Only available on UNIX systems at the moment. + /// + /// You likely want to pass the [`crate::shmem::ShMemDescription`] of the returned [`ShMem`] + /// and reopen the shared memory in the child process using [`crate::shmem::ShMemProvider::shmem_from_description`]. + /// + /// # Errors + /// + /// This function will return an error if the appropriate flags could not be extracted or set. + #[cfg(any(unix, doc))] + pub fn persist(self) -> Result { + let fd = self.shm_fd; - unsafe impl Send for MmapShMemProvider {} + // # Safety + // No user-provided potentially unsafe parameters. + // FFI Calls. + unsafe { + let flags = fcntl(fd, libc::F_GETFD); - #[cfg(unix)] - impl Default for MmapShMemProvider { - fn default() -> Self { - Self::new().unwrap() - } - } + if flags == -1 { + return Err(Error::os_error( + io::Error::last_os_error(), + "Failed to retrieve FD flags", + )); + } - /// Implement [`ShMemProvider`] for [`MmapShMemProvider`]. - #[cfg(unix)] - impl ShMemProvider for MmapShMemProvider { - type ShMem = MmapShMem; - - fn new() -> Result { - Ok(Self {}) - } - fn new_shmem(&mut self, map_size: usize) -> Result { - let mut rand = StdRand::with_seed(crate::rands::random_seed()); - let id = rand.next() as u32; - MmapShMem::new(map_size, id) - } - - fn shmem_from_id_and_size( - &mut self, - id: ShMemId, - size: usize, - ) -> Result { - MmapShMem::shmem_from_id_and_size(id, size) - } - - fn release_shmem(&mut self, shmem: &mut Self::ShMem) { - let fd = CStr::from_bytes_until_nul(shmem.id().as_array()) - .unwrap() - .to_str() - .unwrap() - .parse() - .unwrap(); - unsafe { close(fd) }; + if fcntl(fd, libc::F_SETFD, flags & !libc::FD_CLOEXEC) == -1 { + return Err(Error::os_error( + io::Error::last_os_error(), + "Failed to set FD flags", + )); + } + } + Ok(self) } } @@ -884,18 +889,25 @@ pub mod unix_shmem { type Target = [u8]; fn deref(&self) -> &[u8] { + // # Safety + // No user-provided potentially unsafe parameters. unsafe { slice::from_raw_parts(self.map, self.map_size) } } } impl DerefMut for MmapShMem { fn deref_mut(&mut self) -> &mut [u8] { + // # Safety + // No user-provided potentially unsafe parameters. unsafe { slice::from_raw_parts_mut(self.map, self.map_size) } } } impl Drop for MmapShMem { fn drop(&mut self) { + // # Safety + // No user-provided potentially unsafe parameters. + // Mutable borrow so no possible race. unsafe { assert!( !self.map.is_null(), @@ -922,6 +934,71 @@ pub mod unix_shmem { } } + /// A [`ShMemProvider`] which uses [`shm_open`] and [`mmap`] to provide shared memory mappings. + #[cfg(unix)] + #[derive(Clone, Debug)] + pub struct MmapShMemProvider {} + + impl MmapShMemProvider { + /// Create a [`MmapShMem`] with the specified size and id. + /// + /// At most [`MAX_MMAP_FILENAME_LEN`] - 2 values from filename will be used. Do not include any characters that are illegal as filenames. + #[cfg(any(unix, doc))] + pub fn new_shmem_with_id( + &mut self, + map_size: usize, + id: &[u8], + ) -> Result { + MmapShMem::new(map_size, id) + } + } + + unsafe impl Send for MmapShMemProvider {} + + #[cfg(unix)] + impl Default for MmapShMemProvider { + fn default() -> Self { + Self::new().unwrap() + } + } + + /// Implement [`ShMemProvider`] for [`MmapShMemProvider`]. + #[cfg(unix)] + impl ShMemProvider for MmapShMemProvider { + type ShMem = MmapShMem; + + fn new() -> Result { + Ok(Self {}) + } + + fn new_shmem(&mut self, map_size: usize) -> Result { + let mut rand = StdRand::with_seed(crate::rands::random_seed()); + let id = rand.next() as u32; + let mut full_file_name = format!("/libafl_{}_{}", process::id(), id); + // leave one byte space for the null byte. + full_file_name.truncate(MAX_MMAP_FILENAME_LEN - 1); + MmapShMem::new(map_size, full_file_name.as_bytes()) + } + + fn shmem_from_id_and_size( + &mut self, + id: ShMemId, + size: usize, + ) -> Result { + MmapShMem::shmem_from_id_and_size(id, size) + } + + fn release_shmem(&mut self, shmem: &mut Self::ShMem) { + let fd = CStr::from_bytes_until_nul(shmem.id().as_array()) + .unwrap() + .to_str() + .unwrap() + .parse() + .unwrap(); + unsafe { close(fd) }; + } + } + /// The default sharedmap impl for unix using shmctl & shmget #[derive(Clone, Debug)] pub struct CommonUnixShMem { @@ -1276,6 +1353,188 @@ pub mod unix_shmem { } } } + + /// Module containing `memfd` shared memory support, usable on Linux and Android. + #[cfg(all( + unix, + feature = "std", + any(target_os = "linux", target_os = "android", target_os = "freebsd") + ))] + pub mod memfd { + use alloc::string::ToString; + use core::{ + ops::{Deref, DerefMut}, + ptr, slice, + }; + use std::{ffi::CString, os::fd::IntoRawFd}; + + use libc::{ + c_void, close, fstat, ftruncate, mmap, munmap, MAP_SHARED, PROT_READ, PROT_WRITE, + }; + use nix::sys::memfd::{memfd_create, MemFdCreateFlag}; + + use crate::{ + shmem::{ShMem, ShMemId, ShMemProvider}, + Error, + }; + + /// An memfd based impl for linux/android + #[cfg(unix)] + #[derive(Clone, Debug)] + pub struct MemfdShMem { + id: ShMemId, + map: *mut u8, + map_size: usize, + } + + impl MemfdShMem { + /// Create a new shared memory mapping, using shmget/shmat + pub fn new(map_size: usize) -> Result { + unsafe { + let c_str = CString::new("libAFL").unwrap(); + let Ok(fd) = memfd_create(&c_str, MemFdCreateFlag::empty()) else { + return Err(Error::last_os_error("Failed to create memfd".to_string())); + }; + let fd = fd.into_raw_fd(); + + #[allow(clippy::cast_possible_wrap)] + if ftruncate(fd, map_size as i64) == -1 { + close(fd); + return Err(Error::last_os_error(format!( + "Failed to ftruncate memfd to {map_size}" + ))); + } + let map = mmap( + ptr::null_mut(), + map_size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fd, + 0, + ); + if map == usize::MAX as *mut c_void { + close(fd); + return Err(Error::unknown( + "Failed to map the memfd mapping".to_string(), + )); + } + Ok(Self { + id: ShMemId::from_int(fd), + map: map as *mut u8, + map_size, + }) + } + } + + fn shmem_from_id_and_size(id: ShMemId, map_size: usize) -> Result { + let fd = i32::from(id); + unsafe { + let mut stat = std::mem::zeroed(); + if fstat(fd, &mut stat) == -1 { + return Err(Error::unknown( + "Failed to map the memfd mapping".to_string(), + )); + } + #[allow(clippy::cast_sign_loss)] + if stat.st_size as usize != map_size { + return Err(Error::unknown( + "The mapping's size differs from the requested size".to_string(), + )); + } + let map = mmap( + ptr::null_mut(), + map_size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + fd, + 0, + ); + if map == usize::MAX as *mut c_void { + return Err(Error::last_os_error(format!( + "mmap() failed for map with fd {fd:?}" + ))); + } + Ok(Self { + id: ShMemId::from_int(fd), + map: map as *mut u8, + map_size, + }) + } + } + } + + #[cfg(unix)] + impl ShMem for MemfdShMem { + fn id(&self) -> ShMemId { + self.id + } + } + + impl Deref for MemfdShMem { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + unsafe { slice::from_raw_parts(self.map, self.map_size) } + } + } + + impl DerefMut for MemfdShMem { + fn deref_mut(&mut self) -> &mut [u8] { + unsafe { slice::from_raw_parts_mut(self.map, self.map_size) } + } + } + + /// [`Drop`] implementation for [`MemfdShMem`], which cleans up the mapping. + #[cfg(unix)] + impl Drop for MemfdShMem { + #[allow(trivial_numeric_casts)] + fn drop(&mut self) { + let fd = i32::from(self.id); + + unsafe { + munmap(self.map as *mut _, self.map_size); + close(fd); + } + } + } + + /// A [`ShMemProvider`] which uses memfd to provide shared memory mappings. + #[cfg(unix)] + #[derive(Clone, Debug)] + pub struct MemfdShMemProvider {} + + unsafe impl Send for MemfdShMemProvider {} + + #[cfg(unix)] + impl Default for MemfdShMemProvider { + fn default() -> Self { + Self::new().unwrap() + } + } + + /// Implement [`ShMemProvider`] for [`MemfdShMemProvider`] + #[cfg(unix)] + impl ShMemProvider for MemfdShMemProvider { + type ShMem = MemfdShMem; + + fn new() -> Result { + Ok(Self {}) + } + + fn new_shmem(&mut self, map_size: usize) -> Result { + let mapping = MemfdShMem::new(map_size)?; + Ok(mapping) + } + + fn shmem_from_id_and_size( + &mut self, + id: ShMemId, + size: usize, + ) -> Result { + MemfdShMem::shmem_from_id_and_size(id, size) + } + } + } } /// Then `win32` implementation for shared memory. @@ -1306,7 +1565,7 @@ pub mod win32_shmem { Error, }; - const INVALID_HANDLE_VALUE: isize = -1; + const INVALID_HANDLE_VALUE: *mut c_void = -1isize as *mut c_void; /// The default [`ShMem`] impl for Windows using `shmctl` & `shmget` #[derive(Clone)] @@ -1369,7 +1628,7 @@ pub mod win32_shmem { let handle = OpenFileMappingA( FILE_MAP_ALL_ACCESS.0, BOOL(0), - PCSTR(map_str_bytes.as_ptr() as *mut _), + PCSTR(map_str_bytes.as_ptr().cast_mut()), )?; let map = @@ -1570,16 +1829,42 @@ mod tests { use crate::{ shmem::{ShMemProvider, StdShMemProvider}, - AsSlice, AsSliceMut, + AsSlice, AsSliceMut, Error, }; #[test] #[serial] #[cfg_attr(miri, ignore)] - fn test_shmem_service() { - let mut provider = StdShMemProvider::new().unwrap(); - let mut map = provider.new_shmem(1024).unwrap(); + fn test_shmem_service() -> Result<(), Error> { + let mut provider = StdShMemProvider::new()?; + let mut map = provider.new_shmem(1024)?; map.as_slice_mut()[0] = 1; - assert!(map.as_slice()[0] == 1); + assert_eq!(1, map.as_slice()[0]); + Ok(()) + } + + #[test] + #[cfg(all(unix, not(miri)))] + #[cfg_attr(miri, ignore)] + fn test_persist_shmem() -> Result<(), Error> { + use std::thread; + + use crate::shmem::{MmapShMemProvider, ShMem as _}; + + let mut provider = MmapShMemProvider::new()?; + let mut shmem = provider.new_shmem(1)?.persist()?; + shmem.fill(0); + + let description = shmem.description(); + + let handle = thread::spawn(move || -> Result<(), Error> { + let mut provider = MmapShMemProvider::new()?; + let mut shmem = provider.shmem_from_description(description)?; + shmem.as_slice_mut()[0] = 1; + Ok(()) + }); + handle.join().unwrap()?; + assert_eq!(1, shmem.as_slice()[0]); + Ok(()) } } diff --git a/libafl_bolts/src/staterestore.rs b/libafl_bolts/src/staterestore.rs index bbebe44dff..1542c3b089 100644 --- a/libafl_bolts/src/staterestore.rs +++ b/libafl_bolts/src/staterestore.rs @@ -60,6 +60,7 @@ impl StateShMemContent { } /// A [`StateRestorer`] saves and restores bytes to a shared map. +/// /// If the state gets larger than the preallocated [`ShMem`] shared map, /// it will instead write to disk, and store the file name into the map. /// Writing to [`StateRestorer`] multiple times is not allowed. @@ -194,11 +195,7 @@ where let shmem_content = self.content_mut(); unsafe { - ptr::copy_nonoverlapping( - EXITING_MAGIC as *const u8, - shmem_content.buf.as_mut_ptr(), - len, - ); + ptr::copy_nonoverlapping(EXITING_MAGIC.as_ptr(), shmem_content.buf.as_mut_ptr(), len); } shmem_content.buf_len = EXITING_MAGIC.len(); } @@ -285,22 +282,24 @@ where #[cfg(test)] mod tests { - use alloc::{ - string::{String, ToString}, - vec::Vec, - }; - + #[cfg(not(target_os = "haiku"))] use serial_test::serial; - use crate::{ - shmem::{ShMemProvider, StdShMemProvider}, - staterestore::StateRestorer, - }; - #[test] #[serial] #[cfg_attr(miri, ignore)] + #[cfg(not(target_os = "haiku"))] fn test_state_restore() { + use alloc::{ + string::{String, ToString}, + vec::Vec, + }; + + use crate::{ + shmem::{ShMemProvider, StdShMemProvider}, + staterestore::StateRestorer, + }; + const TESTMAP_SIZE: usize = 1024; let mut shmem_provider = StdShMemProvider::new().unwrap(); diff --git a/libafl_bolts/src/subrange.rs b/libafl_bolts/src/subrange.rs new file mode 100644 index 0000000000..6b9ddf12b8 --- /dev/null +++ b/libafl_bolts/src/subrange.rs @@ -0,0 +1,375 @@ +//! Subrange of things. +//! Convenient wrappers to handle sub-slices efficiently. + +use core::{ + cmp::min, + ops::{Bound, Range, RangeBounds}, +}; + +use crate::{ + ownedref::{OwnedMutSlice, OwnedSlice}, + HasLen, +}; + +/// An immutable contiguous subslice of a byte slice. +/// It is mostly useful to cheaply wrap a subslice of a given input. +/// +/// A mutable version is available: [`SubRangeMutSlice`]. +#[derive(Debug)] +pub struct SubRangeSlice<'a, T> { + /// The (complete) parent input we will work on + parent_slice: OwnedSlice<'a, T>, + /// The range inside the parent input we will work on + range: Range, +} + +/// A mutable contiguous subslice of a byte slice. +/// It is mostly useful to cheaply wrap a subslice of a given input. +/// +/// An immutable version is available: [`SubRangeSlice`]. +#[derive(Debug)] +pub struct SubRangeMutSlice<'a, T> { + /// The (complete) parent input we will work on + parent_slice: OwnedMutSlice<'a, T>, + /// The range inside the parent input we will work on + range: Range, +} + +/// Slice wrapper keeping track of the current read position. +/// Convenient wrapper when the slice must be split in multiple sub-slices and read sequentially. +#[derive(Debug)] +pub struct SliceReader<'a, T> { + parent_slice: &'a [T], + pos: usize, +} + +impl<'a, T> SliceReader<'a, T> { + /// Create a new [`SliceReader`]. + /// The position of the reader is initialized to 0. + #[must_use] + pub fn new(parent_slice: &'a [T]) -> Self { + Self { + parent_slice, + pos: 0, + } + } + + /// Read an immutable sub-slice from the parent slice, from the current cursor position up to `limit` elements. + /// If the resulting slice would go beyond the end of the parent slice, it will be truncated to the length of the parent slice. + /// This function does not provide any feedback on whether the slice was cropped or not. + #[must_use] + pub fn next_sub_slice_truncated(&mut self, limit: usize) -> SubRangeSlice<'a, T> { + let sub_slice = SubRangeSlice::with_slice(self.parent_slice, self.pos..(self.pos + limit)); + + self.pos += sub_slice.len(); + + sub_slice + } + + /// Read an immutable sub-slice from the parent slice, from the current cursor position up to `limit` bytes. + /// If the resulting slice would go beyond the end of the parent slice, it will be limited to the length of the parent slice. + /// The function returns + /// - `Ok(Slice)` if the returned slice has `limit` elements. + /// - `Err(Partial(slice))` if the returned slice has strictly less than `limit` elements and is not empty. + /// - `Err(Empty)` if the reader was already at the end or `limit` equals zero. + pub fn next_sub_input( + &mut self, + limit: usize, + ) -> Result, PartialSubRangeSlice<'a, T>> { + let slice_to_return = self.next_sub_slice_truncated(limit); + + let real_len = slice_to_return.len(); + + if real_len == 0 { + Err(PartialSubRangeSlice::Empty) + } else if real_len < limit { + Err(PartialSubRangeSlice::Partial(slice_to_return)) + } else { + Ok(slice_to_return) + } + } +} + +impl<'a, T> From<&'a [T]> for SliceReader<'a, T> { + fn from(input: &'a [T]) -> Self { + Self::new(input) + } +} + +impl HasLen for SubRangeSlice<'_, T> { + #[inline] + fn len(&self) -> usize { + self.range.len() + } +} + +impl HasLen for SubRangeMutSlice<'_, T> { + #[inline] + fn len(&self) -> usize { + self.range.len() + } +} + +/// Gets the relevant concrete start index from [`RangeBounds`] (inclusive) +pub fn start_index(range: &R) -> usize +where + R: RangeBounds, +{ + match range.start_bound() { + Bound::Unbounded => 0, + Bound::Included(start) => *start, + Bound::Excluded(start) => start + 1, + } +} + +/// Gets the relevant concrete end index from [`RangeBounds`] (exclusive) +pub fn end_index(range: &R, max_len: usize) -> usize +where + R: RangeBounds, +{ + let end = match range.end_bound() { + Bound::Unbounded => max_len, + Bound::Included(end) => end + 1, + Bound::Excluded(end) => *end, + }; + + min(end, max_len) +} + +/// Gets the relevant subrange of a [`Range`] from [`RangeBounds`]. +pub fn sub_range(outer_range: &Range, inner_range: R) -> (Bound, Bound) +where + R: RangeBounds, +{ + let start = + match (outer_range.start_bound(), inner_range.start_bound()) { + (Bound::Unbounded, Bound::Unbounded) => Bound::Unbounded, + (Bound::Excluded(bound), Bound::Unbounded) + | (Bound::Unbounded, Bound::Excluded(bound)) => Bound::Excluded(*bound), + (Bound::Included(bound), Bound::Unbounded) + | (Bound::Unbounded, Bound::Included(bound)) => Bound::Included(*bound), + (Bound::Included(own), Bound::Included(other)) => Bound::Included(own + other), + (Bound::Included(own), Bound::Excluded(other)) + | (Bound::Excluded(own), Bound::Included(other)) => Bound::Excluded(own + other), + (Bound::Excluded(own), Bound::Excluded(other)) => Bound::Excluded(own + other + 1), + }; + + let end = match (outer_range.end_bound(), inner_range.end_bound()) { + (Bound::Unbounded, Bound::Unbounded) => Bound::Unbounded, + (Bound::Excluded(bound), Bound::Unbounded) => Bound::Excluded(*bound), + (Bound::Unbounded, Bound::Excluded(bound)) => Bound::Excluded(outer_range.end - *bound), + (Bound::Included(bound), Bound::Unbounded) => Bound::Included(*bound), + (Bound::Unbounded, Bound::Included(bound)) => Bound::Included(outer_range.end - *bound), + (Bound::Included(own), Bound::Included(other)) => { + Bound::Included(min(*own, outer_range.start + other)) + } + (Bound::Included(own), Bound::Excluded(other)) => { + Bound::Included(min(*own, outer_range.start + other - 1)) + } + (Bound::Excluded(own), Bound::Included(other)) => { + Bound::Included(min(*own - 1, outer_range.start + other)) + } + (Bound::Excluded(own), Bound::Excluded(other)) => { + Bound::Excluded(min(*own, outer_range.start + other)) + } + }; + + (start, end) +} + +/// Representation of a partial slice +/// This is used when providing a slice smaller than the expected one. +/// It notably happens when trying to read the end of an input. +#[derive(Debug)] +pub enum PartialSubRangeSlice<'a, T> { + /// The slice is empty, and thus not kept + Empty, + /// The slice is strictly smaller than the expected one. + Partial(SubRangeSlice<'a, T>), +} + +impl<'a, T> PartialSubRangeSlice<'a, T> { + /// Consumes `PartialBytesSubInput` and returns true if it was empty, false otherwise. + #[must_use] + pub fn empty(self) -> bool { + matches!(self, PartialSubRangeSlice::Empty) + } + + /// Consumes `PartialBytesSubInput` and returns the partial slice if it was a partial slice, None otherwise. + #[must_use] + pub fn partial(self) -> Option> { + #[allow(clippy::match_wildcard_for_single_variants)] + match self { + PartialSubRangeSlice::Partial(partial_slice) => Some(partial_slice), + _ => None, + } + } +} + +impl<'a, T> SubRangeSlice<'a, T> { + /// Creates a new [`SubRangeSlice`], a sub-slice representation of a byte array. + pub fn new(parent_slice: OwnedSlice<'a, T>, range: R) -> Self + where + R: RangeBounds, + { + let parent_len = parent_slice.len(); + + SubRangeSlice { + parent_slice, + range: Range { + start: start_index(&range), + end: end_index(&range, parent_len), + }, + } + } + + /// Get the sub slice as bytes. + #[must_use] + pub fn as_slice(&self) -> &[T] { + &self.parent_slice[self.range.clone()] + } + + /// Creates a new [`SubRangeSlice`] that's a sliced view on a bytes slice. + pub fn with_slice(parent_slice: &'a [T], range: R) -> Self + where + R: RangeBounds, + { + Self::new(parent_slice.into(), range) + } + + /// The parent input + #[must_use] + pub fn parent_slice(self) -> OwnedSlice<'a, T> { + self.parent_slice + } + + /// The inclusive start index in the parent buffer + #[must_use] + pub fn start_index(&self) -> usize { + self.range.start + } + + /// The exclusive end index in the parent buffer + #[must_use] + pub fn end_index(&self) -> usize { + self.range.end + } + + /// Creates a sub range in the current own range + pub fn sub_range(&self, range: R) -> (Bound, Bound) + where + R: RangeBounds, + { + sub_range(&self.range, range) + } +} + +impl<'a, T> SubRangeMutSlice<'a, T> { + /// Creates a new [`SubRangeMutSlice`], a sub-slice representation of a byte array. + pub fn new(parent_slice: OwnedMutSlice<'a, T>, range: R) -> Self + where + R: RangeBounds, + { + let parent_len = parent_slice.len(); + + SubRangeMutSlice { + parent_slice, + range: Range { + start: start_index(&range), + end: end_index(&range, parent_len), + }, + } + } + + /// Get the sub slice as bytes. + #[must_use] + pub fn as_slice(&self) -> &[T] { + &self.parent_slice[self.range.clone()] + } + + /// Get the sub slice as bytes. + #[must_use] + pub fn as_slice_mut(&mut self) -> &mut [T] { + &mut self.parent_slice[self.range.clone()] + } + + /// Creates a new [`SubRangeMutSlice`] that's a view on a bytes slice. + /// The sub-slice can then be used to mutate parts of the original bytes. + pub fn with_slice(parent_slice: &'a mut [T], range: R) -> Self + where + R: RangeBounds, + { + Self::new(parent_slice.into(), range) + } + + /// The parent input + #[must_use] + pub fn parent_slice(self) -> OwnedMutSlice<'a, T> { + self.parent_slice + } + + /// The inclusive start index in the parent buffer + #[must_use] + pub fn start_index(&self) -> usize { + self.range.start + } + + /// The exclusive end index in the parent buffer + #[must_use] + pub fn end_index(&self) -> usize { + self.range.end + } + + /// Creates a sub range in the current own range + pub fn sub_range(&self, range: R) -> (Bound, Bound) + where + R: RangeBounds, + { + sub_range(&self.range, range) + } +} + +#[cfg(test)] +mod tests { + use super::SliceReader; + + #[test] + fn test_bytesreader_toslice_unchecked() { + let bytes_input = vec![1, 2, 3, 4, 5, 6, 7]; + let mut bytes_reader = SliceReader::new(&bytes_input); + + let bytes_read = bytes_reader.next_sub_slice_truncated(2); + assert_eq!(*bytes_read.as_slice(), [1, 2]); + + let bytes_read = bytes_reader.next_sub_slice_truncated(3); + assert_eq!(*bytes_read.as_slice(), [3, 4, 5]); + + let bytes_read = bytes_reader.next_sub_slice_truncated(8); + assert_eq!(*bytes_read.as_slice(), [6, 7]); + + let bytes_read = bytes_reader.next_sub_slice_truncated(8); + let bytes_read_ref: &[u8] = &[]; + assert_eq!(bytes_read.as_slice(), bytes_read_ref); + } + + #[test] + fn test_bytesreader_toslice() { + let bytes_input = vec![1, 2, 3, 4, 5, 6, 7]; + let mut bytes_reader = SliceReader::new(&bytes_input); + + let bytes_read = bytes_reader.next_sub_input(2); + assert_eq!(*bytes_read.unwrap().as_slice(), [1, 2]); + + let bytes_read = bytes_reader.next_sub_input(3); + assert_eq!(*bytes_read.unwrap().as_slice(), [3, 4, 5]); + + let bytes_read = bytes_reader.next_sub_input(8); + assert_eq!( + *bytes_read.unwrap_err().partial().unwrap().as_slice(), + [6, 7] + ); + + let bytes_read = bytes_reader.next_sub_input(8); + assert!(bytes_read.unwrap_err().empty()); + } +} diff --git a/libafl_bolts/src/tuples.rs b/libafl_bolts/src/tuples.rs index 5f304ffe39..05280f3298 100644 --- a/libafl_bolts/src/tuples.rs +++ b/libafl_bolts/src/tuples.rs @@ -11,7 +11,6 @@ use core::{ marker::PhantomData, mem::transmute, ops::{Index, IndexMut}, - ptr::{addr_of, addr_of_mut}, }; #[cfg(feature = "alloc")] @@ -33,7 +32,7 @@ pub fn type_eq() -> bool { struct W<'a, T: ?Sized, U: ?Sized>(&'a Cell, PhantomData (&'a T, &'a U)>); // default implementation: if the types are unequal, we will use the clone implementation - impl<'a, T: ?Sized, U: ?Sized> Clone for W<'a, T, U> { + impl Clone for W<'_, T, U> { #[inline] fn clone(&self) -> Self { // indicate that the types are unequal @@ -46,7 +45,7 @@ pub fn type_eq() -> bool { // specialized implementation: Copy is only implemented if the types are the same #[allow(clippy::mismatching_type_param_order)] - impl<'a, T: ?Sized> Copy for W<'a, T, T> {} + impl Copy for W<'_, T, T> {} let detected = Cell::new(true); // [].clone() is *specialized* in core. @@ -243,7 +242,7 @@ where { fn match_first_type(&self) -> Option<&T> { if TypeId::of::() == TypeId::of::() { - unsafe { (addr_of!(self.0) as *const T).as_ref() } + unsafe { (&raw const self.0 as *const T).as_ref() } } else { self.1.match_first_type::() } @@ -251,7 +250,7 @@ where fn match_first_type_mut(&mut self) -> Option<&mut T> { if TypeId::of::() == TypeId::of::() { - unsafe { (addr_of_mut!(self.0) as *mut T).as_mut() } + unsafe { (&raw mut self.0 as *mut T).as_mut() } } else { self.1.match_first_type_mut::() } @@ -398,7 +397,7 @@ where fn match_type(&self, f: &mut FN) { // Switch this check to https://stackoverflow.com/a/60138532/7658998 when in stable and remove 'static if TypeId::of::() == TypeId::of::() { - f(unsafe { (addr_of!(self.0) as *const T).as_ref() }.unwrap()); + f(unsafe { (&raw const self.0 as *const T).as_ref() }.unwrap()); } self.1.match_type::(f); } @@ -406,7 +405,7 @@ where fn match_type_mut(&mut self, f: &mut FN) { // Switch this check to https://stackoverflow.com/a/60138532/7658998 when in stable and remove 'static if TypeId::of::() == TypeId::of::() { - f(unsafe { (addr_of_mut!(self.0) as *mut T).as_mut() }.unwrap()); + f(unsafe { (&raw mut self.0 as *mut T).as_mut() }.unwrap()); } self.1.match_type_mut::(f); } @@ -417,6 +416,9 @@ where pub trait NamedTuple: HasConstLen { /// Gets the name of this tuple fn name(&self, index: usize) -> Option<&Cow<'static, str>>; + + /// Gets all the names + fn names(&self) -> Vec>; } #[cfg(feature = "alloc")] @@ -424,6 +426,10 @@ impl NamedTuple for () { fn name(&self, _index: usize) -> Option<&Cow<'static, str>> { None } + + fn names(&self) -> Vec> { + Vec::new() + } } #[cfg(feature = "alloc")] @@ -448,6 +454,13 @@ where self.1.name(index - 1) } } + + fn names(&self) -> Vec> { + let first = self.0.name().clone(); + let mut last = self.1.names(); + last.insert(0, first); + last + } } /// Match for a name and return the value @@ -480,7 +493,7 @@ where { fn match_name(&self, name: &str) -> Option<&T> { if type_eq::() && name == self.0.name() { - unsafe { (addr_of!(self.0) as *const T).as_ref() } + unsafe { (&raw const self.0 as *const T).as_ref() } } else { self.1.match_name::(name) } @@ -488,7 +501,7 @@ where fn match_name_mut(&mut self, name: &str) -> Option<&mut T> { if type_eq::() && name == self.0.name() { - unsafe { (addr_of_mut!(self.0) as *mut T).as_mut() } + unsafe { (&raw mut self.0 as *mut T).as_mut() } } else { self.1.match_name_mut::(name) } @@ -596,7 +609,6 @@ pub struct RefIndexable(RM, PhantomData); impl From for RefIndexable where RM: Deref, - M: MatchName, { fn from(value: RM) -> Self { RefIndexable(value, PhantomData) @@ -657,54 +669,32 @@ where /// Allows prepending of values to a tuple pub trait Prepend { - /// The Resulting [`TupleList`], of an [`Prepend::prepend()`] call, - /// including the prepended entry. - type PreprendResult; - /// Prepend a value to this tuple, returning a new tuple with prepended value. #[must_use] - fn prepend(self, value: T) -> (T, Self::PreprendResult); + fn prepend(self, value: T) -> (T, Self); } /// Implement prepend for tuple list. impl Prepend for Tail { - type PreprendResult = Self; - - fn prepend(self, value: T) -> (T, Self::PreprendResult) { + fn prepend(self, value: T) -> (T, Self) { (value, self) } } /// Append to a tuple -pub trait Append { - /// The Resulting [`TupleList`], of an [`Append::append()`] call, - /// including the appended entry. - type AppendResult; - +pub trait Append +where + Self: Sized, +{ /// Append Value and return the tuple #[must_use] - fn append(self, value: T) -> Self::AppendResult; + fn append(self, value: T) -> (Self, T); } -/// Implement append for an empty tuple list. -impl Append for () { - type AppendResult = (T, ()); - - fn append(self, value: T) -> Self::AppendResult { - (value, ()) - } -} - -/// Implement append for non-empty tuple list. -impl Append for (Head, Tail) -where - Tail: Append, -{ - type AppendResult = (Head, Tail::AppendResult); - - fn append(self, value: T) -> Self::AppendResult { - let (head, tail) = self; - (head, tail.append(value)) +/// Implement append for tuple list. +impl Append for Head { + fn append(self, value: T) -> (Self, T) { + (self, value) } } @@ -846,22 +836,6 @@ macro_rules! tuple_for_each_mut { }; } -#[cfg(test)] -#[cfg(feature = "std")] -#[test] -#[allow(clippy::items_after_statements)] -pub fn test_macros() { - let mut t = tuple_list!(1, "a"); - - tuple_for_each!(f1, std::fmt::Display, t, |x| { - log::info!("{x}"); - }); - - tuple_for_each_mut!(f2, std::fmt::Display, t, |x| { - log::info!("{x}"); - }); -} - /* // Define trait and implement it for several primitive types. @@ -966,4 +940,20 @@ mod test { #[allow(clippy::no_effect_underscore_binding)] let _type_assert: tuple_list_type!(W, W, W) = mapped; } + + /// Function that tests the tuple macros + #[test] + #[cfg(feature = "std")] + #[allow(clippy::items_after_statements)] + fn test_macros() { + let mut t = tuple_list!(1, "a"); + + tuple_for_each!(f1, std::fmt::Display, t, |x| { + log::info!("{x}"); + }); + + tuple_for_each_mut!(f2, std::fmt::Display, t, |x| { + log::info!("{x}"); + }); + } } diff --git a/libafl_cc/Cargo.toml b/libafl_cc/Cargo.toml index 94e18d5ad3..5c89b8c39d 100644 --- a/libafl_cc/Cargo.toml +++ b/libafl_cc/Cargo.toml @@ -9,16 +9,53 @@ readme = "README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "testing", "compiler"] edition = "2021" -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +rust-version = "1.78" +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[features] +default = [ + "ddg-instr", + "function-logging", + "cmplog-routines", + "autotokens", + "coverage-accounting", + "cmplog-instructions", + "ctx", + "dump-cfg", + "profiling", +] + +# llvm passes +ddg-instr = [] +function-logging = [] +cmplog-routines = [] +autotokens = [] +coverage-accounting = [] +cmplog-instructions = [] +ctx = [] +dump-cfg = [] +profiling = [] + [build-dependencies] -cc = { version = "1.0", features = ["parallel"] } -which = "6.0" +cc = { workspace = true, features = ["parallel"] } +which = { workspace = true } [target.'cfg(target_vendor = "apple")'.build-dependencies] -glob = "0.3" +glob = "0.3.1" [dependencies] -serde = { version = "1.0", default-features = false, features = ["alloc", "derive"] } # serialization lib \ No newline at end of file +serde = { workspace = true, default-features = false, features = [ + "alloc", + "derive", +] } # serialization lib + +[lints] +workspace = true diff --git a/libafl_cc/build.rs b/libafl_cc/build.rs index 1b44657b13..15f1403ec0 100644 --- a/libafl_cc/build.rs +++ b/libafl_cc/build.rs @@ -107,12 +107,12 @@ fn find_llvm_config() -> Result { fn exec_llvm_config(args: &[&str]) -> String { let llvm_config = find_llvm_config().expect("Unexpected error"); - match Command::new(llvm_config).args(args).output() { + match Command::new(&llvm_config).args(args).output() { Ok(output) => String::from_utf8(output.stdout) .expect("Unexpected llvm-config output") .trim() .to_string(), - Err(e) => panic!("Could not execute llvm-config: {e}"), + Err(e) => panic!("Could not execute {llvm_config}: {e}"), } } @@ -144,6 +144,7 @@ fn find_llvm_version() -> Option { } #[allow(clippy::too_many_arguments)] +#[allow(unused)] fn build_pass( bindir_path: &Path, out_dir: &Path, @@ -152,7 +153,7 @@ fn build_pass( src_dir: &Path, src_file: &str, additional_srcfiles: Option<&Vec<&str>>, - optional: bool, + required: bool, ) { let dot_offset = src_file.rfind('.').unwrap(); let src_stub = &src_file[..dot_offset]; @@ -164,7 +165,7 @@ fn build_pass( }; println!("cargo:rerun-if-changed=src/{src_file}"); - let r = if cfg!(unix) { + let command_result = if cfg!(unix) { let r = Command::new(bindir_path.join("clang++")) .arg("-v") .arg(format!("--target={}", env::var("HOST").unwrap())) @@ -198,27 +199,27 @@ fn build_pass( None }; - match r { - Some(r) => match r { + match command_result { + Some(res) => match res { Ok(s) => { if !s.success() { - if optional { - println!("cargo:warning=Skipping src/{src_file} - Exit status: {s}"); + if required { + panic!("Failed to compile required compiler pass src/{src_file} - Exit status: {s}"); } else { - panic!("Failed to compile {src_file} - Exit status: {s}"); + println!("cargo:warning=Skipping non-required compiler pass src/{src_file} - Reason: Exit status {s}. You can ignore this error unless you want this compiler pass."); } } } Err(err) => { - if optional { - println!("cargo:warning=Skipping src/{src_file} - {err}"); + if required { + panic!("Failed to compile required compiler pass src/{src_file} - Exit status: {err}"); } else { - panic!("Failed to compile {src_file} - {err}"); + println!("cargo:warning=Skipping non-required compiler pass src/{src_file} - Reason: Exit status {err}. You can ignore this error unless you want this compiler pass."); } } }, None => { - println!("cargo:warning=Skipping src/{src_file} - Only supported on Windows or *nix."); + println!("cargo:warning=Skipping compiler pass src/{src_file} - Only supported on Windows or *nix."); } } } @@ -238,7 +239,7 @@ fn main() { println!("cargo:rerun-if-env-changed=LLVM_CXXFLAGS"); println!("cargo:rerun-if-env-changed=LLVM_LDFLAGS"); println!("cargo:rerun-if-env-changed=LLVM_VERSION"); - println!("cargo:rerun-if-env-changed=LIBAFL_EDGES_MAP_SIZE_IN_USE"); + println!("cargo:rerun-if-env-changed=LIBAFL_EDGES_MAP_DEFAULT_SIZE"); println!("cargo:rerun-if-env-changed=LIBAFL_ACCOUNTING_MAP_SIZE"); println!("cargo:rerun-if-env-changed=LIBAFL_DDG_MAP_SIZE"); println!("cargo:rerun-if-changed=src/common-llvm.h"); @@ -311,13 +312,13 @@ pub const LIBAFL_CC_LLVM_VERSION: Option = None; }; let mut cxxflags: Vec = cxxflags.split_whitespace().map(String::from).collect(); - let edges_map_size_in_use: usize = option_env!("LIBAFL_EDGES_MAP_SIZE_IN_USE") + let edge_map_default_size: usize = option_env!("LIBAFL_EDGES_MAP_DEFAULT_SIZE") .map_or(Ok(65_536), str::parse) - .expect("Could not parse LIBAFL_EDGES_MAP_SIZE_IN_USE"); - let edges_map_size_max: usize = option_env!("LIBAFL_EDGES_MAP_SIZE_MAX") + .expect("Could not parse LIBAFL_EDGES_MAP_DEFAULT_SIZE"); + let edge_map_allocated_size: usize = option_env!("LIBAFL_EDGES_MAP_ALLOCATED_SIZE") .map_or(Ok(2_621_440), str::parse) - .expect("Could not parse LIBAFL_EDGES_MAP_SIZE_IN_USE"); - cxxflags.push(format!("-DEDGES_MAP_SIZE_IN_USE={edges_map_size_in_use}")); + .expect("Could not parse LIBAFL_EDGES_MAP_DEFAULT_SIZE"); + cxxflags.push(format!("-DEDGES_MAP_DEFAULT_SIZE={edge_map_default_size}")); let acc_map_size: usize = option_env!("LIBAFL_ACCOUNTING_MAP_SIZE") .map_or(Ok(65_536), str::parse) @@ -347,9 +348,9 @@ pub const LIBAFL_CC_LLVM_VERSION: Option = None; pub const CLANGXX_PATH: &str = {clangcpp:?}; /// The default size of the edges map the fuzzer uses - pub const EDGES_MAP_SIZE_IN_USE: usize = {edges_map_size_in_use}; + pub const EDGES_MAP_DEFAULT_SIZE: usize = {edge_map_default_size}; /// The real allocated size of the edges map - pub const EDGES_MAP_SIZE_MAX: usize = {edges_map_size_max}; + pub const EDGES_MAP_ALLOCATED_SIZE: usize = {edge_map_allocated_size}; /// The size of the accounting maps pub const ACCOUNTING_MAP_SIZE: usize = {acc_map_size}; @@ -416,6 +417,7 @@ pub const LIBAFL_CC_LLVM_VERSION: Option = None; ldflags.push(&sdk_path); }; + #[cfg(feature = "ddg-instr")] build_pass( bindir_path, out_dir, @@ -424,42 +426,104 @@ pub const LIBAFL_CC_LLVM_VERSION: Option = None; src_dir, "ddg-instr.cc", Some(&vec!["ddg-utils.cc"]), + true, + ); + + #[cfg(feature = "function-logging")] + build_pass( + bindir_path, + out_dir, + &cxxflags, + &ldflags, + src_dir, + "function-logging.cc", + None, + true, + ); + + #[cfg(feature = "cmplog-routines")] + build_pass( + bindir_path, + out_dir, + &cxxflags, + &ldflags, + src_dir, + "cmplog-routines-pass.cc", + None, + true, + ); + + #[cfg(feature = "autotokens")] + build_pass( + bindir_path, + out_dir, + &cxxflags, + &ldflags, + src_dir, + "autotokens-pass.cc", + None, + true, + ); + + #[cfg(feature = "coverage-accounting")] + build_pass( + bindir_path, + out_dir, + &cxxflags, + &ldflags, + src_dir, + "coverage-accounting-pass.cc", + None, + true, + ); + + #[cfg(feature = "cmplog-instructions")] + build_pass( + bindir_path, + out_dir, + &cxxflags, + &ldflags, + src_dir, + "cmplog-instructions-pass.cc", + None, + true, + ); + + #[cfg(feature = "ctx")] + build_pass( + bindir_path, + out_dir, + &cxxflags, + &ldflags, + src_dir, + "ctx-pass.cc", + None, + true, + ); + + #[cfg(feature = "dump-cfg")] + build_pass( + bindir_path, + out_dir, + &cxxflags, + &ldflags, + src_dir, + "dump-cfg-pass.cc", + None, false, ); - for pass in &[ - "function-logging.cc", - "cmplog-routines-pass.cc", - "autotokens-pass.cc", - "coverage-accounting-pass.cc", - "cmplog-instructions-pass.cc", - "ctx-pass.cc", - ] { - build_pass( - bindir_path, - out_dir, - &cxxflags, - &ldflags, - src_dir, - pass, - None, - false, - ); - } - - // Optional pass - for pass in &["dump-cfg-pass.cc", "profiling.cc"] { - build_pass( - bindir_path, - out_dir, - &cxxflags, - &ldflags, - src_dir, - pass, - None, - true, - ); - } + #[cfg(feature = "profiling")] + build_pass( + bindir_path, + out_dir, + &cxxflags, + &ldflags, + src_dir, + "profiling-pass.cc", + None, + false, + ); cc::Build::new() .file(src_dir.join("no-link-rt.c")) diff --git a/libafl_cc/src/autotokens-pass.cc b/libafl_cc/src/autotokens-pass.cc index af0b094144..74224c0271 100644 --- a/libafl_cc/src/autotokens-pass.cc +++ b/libafl_cc/src/autotokens-pass.cc @@ -498,6 +498,11 @@ bool AutoTokensPass::runOnModule(Module &M) { Value *op2 = callInst->getArgOperand(2); ConstantInt *ilen = dyn_cast(op2); + if (!ilen) { + op2 = callInst->getArgOperand(1); + ilen = dyn_cast(op2); + } + if (ilen) { uint64_t literalLength = optLen; optLen = ilen->getZExtValue(); diff --git a/libafl_cc/src/cfg.rs b/libafl_cc/src/cfg.rs index 9c353ffa52..0551bb8b97 100644 --- a/libafl_cc/src/cfg.rs +++ b/libafl_cc/src/cfg.rs @@ -95,9 +95,9 @@ where /// Inserts an edge into CFG. #[must_use] pub fn new() -> Self { - let map_size = option_env!("LIBAFL_EDGES_MAP_SIZE_IN_USE") + let map_size = option_env!("LIBAFL_EDGES_MAP_DEFAULT_SIZE") .map_or(Ok(65536), str::parse) - .expect("Could not parse LIBAFL_EDGES_MAP_SIZE_IN_USE"); + .expect("Could not parse LIBAFL_EDGES_MAP_DEFAULT_SIZE"); Self { edges: (0..map_size).map(|_| None).collect(), func_to_entry_bb: HashMap::default(), diff --git a/libafl_cc/src/clang.rs b/libafl_cc/src/clang.rs index 56bad90685..42d5b137b8 100644 --- a/libafl_cc/src/clang.rs +++ b/libafl_cc/src/clang.rs @@ -171,7 +171,7 @@ impl ToolWrapper for ClangWrapper { if arg_as_path .extension() - .map_or(false, |ext| ext.eq_ignore_ascii_case("s")) + .is_some_and(|ext| ext.eq_ignore_ascii_case("s")) { self.is_asm = true; } diff --git a/libafl_cc/src/ctx-pass.cc b/libafl_cc/src/ctx-pass.cc index 9f70445e2f..527c0cddac 100644 --- a/libafl_cc/src/ctx-pass.cc +++ b/libafl_cc/src/ctx-pass.cc @@ -64,7 +64,7 @@ using namespace llvm; -#define MAP_SIZE EDGES_MAP_SIZE_IN_USE +#define MAP_SIZE EDGES_MAP_DEFAULT_SIZE namespace { diff --git a/libafl_cc/src/dump-cfg-pass.cc b/libafl_cc/src/dump-cfg-pass.cc index 8fda3e0a9c..12affe2cf0 100644 --- a/libafl_cc/src/dump-cfg-pass.cc +++ b/libafl_cc/src/dump-cfg-pass.cc @@ -106,13 +106,11 @@ class DumpCfgPass : public ModulePass { #else if (n.startswith("llvm.")) { #endif + return true; + } else { + return false; } - return true; } - else { - return false; - } -} }; } // namespace diff --git a/libafl_cc/src/function-logging.cc b/libafl_cc/src/function-logging.cc index b67641f5b6..f96c2f4b2f 100644 --- a/libafl_cc/src/function-logging.cc +++ b/libafl_cc/src/function-logging.cc @@ -64,7 +64,7 @@ using namespace llvm; -#define MAP_SIZE EDGES_MAP_SIZE_IN_USE +#define MAP_SIZE EDGES_MAP_DEFAULT_SIZE namespace { diff --git a/libafl_cc/src/lib.rs b/libafl_cc/src/lib.rs index 7a13081a74..8d71910c03 100644 --- a/libafl_cc/src/lib.rs +++ b/libafl_cc/src/lib.rs @@ -1,21 +1,5 @@ //! Compiler Wrapper from `LibAFL` -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(clippy::all)] -#![deny(clippy::pedantic)] -#![forbid(unexpected_cfgs)] -#![allow( - clippy::unreadable_literal, - clippy::type_repetition_in_bounds, - clippy::missing_errors_doc, - clippy::cast_possible_truncation, - clippy::used_underscore_binding, - clippy::ptr_as_ptr, - clippy::missing_panics_doc, - clippy::missing_docs_in_private_items, - clippy::module_name_repetitions, - clippy::unreadable_literal -)] #![cfg_attr(not(test), warn( missing_debug_implementations, missing_docs, @@ -56,6 +40,7 @@ ) )] +use core::str; use std::{path::Path, process::Command}; pub mod ar; @@ -339,4 +324,34 @@ pub trait CompilerWrapper: ToolWrapper { fn link_staticlib(&mut self, dir: &Path, name: S) -> &'_ mut Self where S: AsRef; + + /// Finds the current `python3` version and adds `-lpython3.` as linker argument. + /// Useful for fuzzers that need libpython, such as `nautilus`-based fuzzers. + fn link_libpython(&mut self) -> Result<&'_ mut Self, String> { + Ok(self.add_link_arg(format!("-l{}", find_python3_version()?))) + } +} + +/// Helper function to find the current python3 version, if you need this information at link time. +/// Example output: `python3.11` +/// Example use: `.add_link_arg(format!("-l{}", find_python3_version()?))` +/// Hint: you can use `link_libpython()` directly. +fn find_python3_version() -> Result { + match Command::new("python3").arg("--version").output() { + Ok(output) => { + let python_version = str::from_utf8(&output.stdout).unwrap_or_default().trim(); + if python_version.is_empty() { + return Err("Empty return from python3 --version".to_string()); + } + let version = python_version.split("Python 3.").nth(1).ok_or_else(|| { + format!("Could not find Python 3 in version string: {python_version}") + })?; + let mut version = version.split('.'); + let version = version.next().ok_or_else(|| { + format!("Could not split python3 version string {python_version}") + })?; + Ok(format!("python3.{version}")) + } + Err(err) => Err(format!("Could not execute python3 --version: {err:?}")), + } } diff --git a/libafl_cc/src/profiling.cc b/libafl_cc/src/profiling-pass.cc similarity index 100% rename from libafl_cc/src/profiling.cc rename to libafl_cc/src/profiling-pass.cc diff --git a/libafl_concolic/symcc_libafl/Cargo.toml b/libafl_concolic/symcc_libafl/Cargo.toml index 4fed903701..61ba14d768 100644 --- a/libafl_concolic/symcc_libafl/Cargo.toml +++ b/libafl_concolic/symcc_libafl/Cargo.toml @@ -2,14 +2,24 @@ name = "symcc_libafl" version.workspace = true edition = "2021" -authors = ["Julius Hohnerlein ", "Andrea Fioraldi ", "Dominik Maier "] +authors = [ + "Julius Hohnerlein ", + "Andrea Fioraldi ", + "Dominik Maier ", +] description = "Meta package for symcc_runtime" documentation = "https://docs.rs/symcc_libafl" repository = "https://github.com/AFLplusplus/LibAFL/" readme = "README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "testing", "security"] -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -24,6 +34,9 @@ build = ["which", "cmake"] clone = ["which"] [dependencies] -which = { version = "6.0", optional = true } -cmake = { version = "0.1", optional = true } -log = "0.4.20" +which = { workspace = true, optional = true } +cmake = { workspace = true, optional = true } +log = { workspace = true } + +[lints] +workspace = true diff --git a/libafl_concolic/symcc_libafl/src/lib.rs b/libafl_concolic/symcc_libafl/src/lib.rs index dfe3df9569..a0f5507718 100644 --- a/libafl_concolic/symcc_libafl/src/lib.rs +++ b/libafl_concolic/symcc_libafl/src/lib.rs @@ -1,7 +1,5 @@ //! This is a 'meta-package' for libafl that exposes a consistent URL and commit hash for the //! [`SymCC` fork](https://github.com/AFLplusplus/symcc). -#![allow(clippy::module_name_repetitions)] -#![forbid(unexpected_cfgs)] /// The URL of the `LibAFL` `SymCC` fork. pub const SYMCC_REPO_URL: &str = "https://github.com/AFLplusplus/symcc.git"; @@ -65,6 +63,8 @@ pub use clone::clone_symcc; #[cfg(feature = "build")] mod build { + #![allow(clippy::module_name_repetitions)] + use std::path::{Path, PathBuf}; /// Builds `SymCC` at the given directory using [`cmake`](https://crates.io/crates/cmake). diff --git a/libafl_concolic/symcc_runtime/Cargo.toml b/libafl_concolic/symcc_runtime/Cargo.toml index d12813ef2c..8cccd60633 100644 --- a/libafl_concolic/symcc_runtime/Cargo.toml +++ b/libafl_concolic/symcc_runtime/Cargo.toml @@ -2,7 +2,11 @@ name = "symcc_runtime" version.workspace = true edition = "2021" -authors = ["Julius Hohnerlein ", "Andrea Fioraldi ", "Dominik Maier "] +authors = [ + "Julius Hohnerlein ", + "Andrea Fioraldi ", + "Dominik Maier ", +] description = "Build Concolic Tracing tools based on SymCC in Rust" documentation = "https://docs.rs/symcc_runtime" repository = "https://github.com/AFLplusplus/LibAFL/" @@ -10,7 +14,13 @@ readme = "README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "testing", "security"] build = "build.rs" -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -22,15 +32,19 @@ all-features = true no-cpp-runtime = [] [dependencies] -unchecked_unwrap = "4" -ctor = "0.2" -libc = "0.2" -libafl = { path = "../../libafl", version = "0.13.0", default-features=false, features=["std", "serdeany_autoreg"] } -libafl_bolts = { path = "../../libafl_bolts", version = "0.13.0", default-features=false, features=["std", "serdeany_autoreg"] } +libafl = { workspace = true, features = ["std", "serdeany_autoreg"] } +libafl_bolts = { workspace = true, features = ["std", "serdeany_autoreg"] } + +unchecked_unwrap = "4.0.0" +ctor = "0.2.9" +libc = { workspace = true } [build-dependencies] -cmake = "0.1" -bindgen = "0.69.4" -regex = "1" -which = "6.0" -symcc_libafl = { path = "../symcc_libafl", version = "0.13.0" } +cmake = { workspace = true } +bindgen = { workspace = true } +regex = { workspace = true } +which = { workspace = true } +symcc_libafl = { workspace = true, default-features = true, version = "0.14.1" } + +[lints] +workspace = true diff --git a/libafl_concolic/symcc_runtime/src/filter.rs b/libafl_concolic/symcc_runtime/src/filter.rs index 067ae8e9b1..6059c93c8d 100644 --- a/libafl_concolic/symcc_runtime/src/filter.rs +++ b/libafl_concolic/symcc_runtime/src/filter.rs @@ -35,7 +35,9 @@ macro_rules! rust_filter_function_declaration { } /// A [`Filter`] can decide for each expression whether the expression should be traced symbolically or be -/// concretized. This allows to implement filtering mechanisms that reduce the amount of traced expressions by +/// concretized. +/// +/// This allows us to implement filtering mechanisms that reduce the amount of traced expressions by /// concretizing uninteresting expressions. /// If a filter concretizes an expression that would have later been used as part of another expression that /// is still symbolic, a concrete instead of a symbolic value is received. @@ -78,8 +80,9 @@ pub trait Filter { invoke_macro_with_rust_runtime_exports!(rust_filter_function_declaration;); } -/// A `FilterRuntime` wraps a [`Runtime`] with a [`Filter`], applying the filter before passing expressions to the inner -/// runtime. +/// A `FilterRuntime` wraps a [`Runtime`] with a [`Filter`]. +/// +/// It applies the filter before passing expressions to the inner runtime. /// It also implements [`Runtime`], allowing for composing multiple [`Filter`]'s in a chain. #[allow(clippy::module_name_repetitions)] pub struct FilterRuntime { diff --git a/libafl_concolic/symcc_runtime/src/lib.rs b/libafl_concolic/symcc_runtime/src/lib.rs index f5baf640da..7caea28316 100644 --- a/libafl_concolic/symcc_runtime/src/lib.rs +++ b/libafl_concolic/symcc_runtime/src/lib.rs @@ -27,12 +27,6 @@ //! # SymCC and SymQEMU expect to runtime file to be called `libSymRuntime.so`. Setting the name to `SymRuntime` achieves this. //! name = "SymRuntime" //! ``` -#![allow( - clippy::module_name_repetitions, - clippy::missing_panics_doc, - clippy::pub_underscore_fields -)] -#![forbid(unexpected_cfgs)] pub mod filter; pub mod tracing; @@ -40,8 +34,9 @@ pub mod tracing; // The following exports are used by the `export_runtime` macro. They are therefore exported, but hidden from docs, as they are not supposed to be used directly by the user. #[doc(hidden)] #[cfg(target_os = "linux")] -#[allow(clippy::mixed_attributes_style)] pub mod cpp_runtime { + #![allow(clippy::mixed_attributes_style)] + #![allow(clippy::pub_underscore_fields)] #![allow(non_upper_case_globals)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] @@ -164,7 +159,7 @@ macro_rules! export_rust_runtime_fn { // special case for build_integer_from_buffer cuz the next one just doesn't work!!!!!!! (pub fn build_integer_from_buffer( buffer: *mut ::std::os::raw::c_void, - num_bits: ::std::os::raw::c_uint,) -> RSymExpr,$c_name:ident; $rt_cb:path) => { + num_bits: ::std::os::raw::c_uint$(,)?) -> RSymExpr,$c_name:ident; $rt_cb:path) => { #[allow(clippy::missing_safety_doc)] #[no_mangle] pub unsafe extern "C" fn _rsym_build_integer_from_buffer(buffer: *mut ::std::os::raw::c_void, num_bits: ::std::os::raw::c_uint) { @@ -207,7 +202,9 @@ impl Runtime for NopRuntime { invoke_macro_with_rust_runtime_exports!(impl_nop_runtime_fn;); } -/// This runtime can be constructed from an [`Option`] of a runtime, concretizing all expressions in the `None` case and forwarding expressions to the respective runtime in the `Some` case. +/// This runtime can be constructed from an [`Option`] of a runtime. +/// +/// It concretizes all expressions in the `None` case and forwards expressions to the respective runtime in the `Some` case. /// This is especially useful for parts of the processing pipeline that should be activated based on a runtime configuration, such as an environment variable. pub struct OptionalRuntime { inner: Option, diff --git a/libafl_concolic/symcc_runtime/src/tracing.rs b/libafl_concolic/symcc_runtime/src/tracing.rs index ade5da9d75..428b194d12 100644 --- a/libafl_concolic/symcc_runtime/src/tracing.rs +++ b/libafl_concolic/symcc_runtime/src/tracing.rs @@ -6,6 +6,7 @@ use libafl::observers::concolic::SymExpr; use crate::{RSymExpr, Runtime}; /// Traces the expressions according to the format described in [`libafl::observers::concolic::serialization_format`]. +/// /// The format can be read from elsewhere to perform processing of the expressions outside of the runtime. pub struct TracingRuntime { writer: StdShMemMessageFileWriter, diff --git a/libafl_concolic/test/dump_constraints/Cargo.toml b/libafl_concolic/test/dump_constraints/Cargo.toml index 5b441b6074..5b5c0854ae 100644 --- a/libafl_concolic/test/dump_constraints/Cargo.toml +++ b/libafl_concolic/test/dump_constraints/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dump_constraints" -version = "0.1.0" +version.workspace = true edition = "2021" authors = ["Julius Hohnerlein "] description = "Dump Constraints, a lib to see the constraints oof a run" @@ -9,11 +9,20 @@ repository = "https://github.com/AFLplusplus/LibAFL/" readme = "../README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "libafl", "ldpreload"] -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -libafl = {path = "../../../libafl"} -libafl_bolts = {path = "../../../libafl_bolts"} -clap = { version = "4.5", features = ["derive"] } +libafl = { workspace = true, default-features = true } +libafl_bolts = { workspace = true, default-features = true } +clap = { workspace = true, features = ["derive"] } + +[lints] +workspace = true diff --git a/libafl_concolic/test/dump_constraints/src/main.rs b/libafl_concolic/test/dump_constraints/src/main.rs index 51b02ae988..34d12b6322 100644 --- a/libafl_concolic/test/dump_constraints/src/main.rs +++ b/libafl_concolic/test/dump_constraints/src/main.rs @@ -1,7 +1,6 @@ //! This is a straight-forward command line utility that can dump constraints written by a tracing runtime. //! It achieves this by running an instrumented target program with the necessary environment variables set. //! When the program has finished executing, it dumps the traced constraints to a file. -#![forbid(unexpected_cfgs)] use std::{ ffi::OsString, diff --git a/libafl_concolic/test/runtime_test/Cargo.toml b/libafl_concolic/test/runtime_test/Cargo.toml index 6ef0bbb516..ae700eece3 100644 --- a/libafl_concolic/test/runtime_test/Cargo.toml +++ b/libafl_concolic/test/runtime_test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "runtime_test" -version = "0.1.0" +version.workspace = true edition = "2021" authors = ["Julius Hohnerlein "] description = "Runtime test of LibAFL fuzzing with symbolicc execution" @@ -8,14 +8,23 @@ documentation = "https://docs.rs/libafl" repository = "https://github.com/AFLplusplus/LibAFL/" readme = "../README.md" license = "MIT OR Apache-2.0" -keywords = ["fuzzing", "libafl", "symbolic", "symcc", "symqemu", "fuzzer"] -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +keywords = ["fuzzing", "libafl", "symbolic", "symcc", "symqemu"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] [lib] -crate-type = ["cdylib"] +crate-type = ["cdylib"] name = "SymRuntime" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] symcc_runtime = { path = "../../symcc_runtime" } + +[lints] +workspace = true diff --git a/libafl_derive/Cargo.toml b/libafl_derive/Cargo.toml index 0494221603..2cc461b66c 100644 --- a/libafl_derive/Cargo.toml +++ b/libafl_derive/Cargo.toml @@ -9,11 +9,22 @@ readme = "../README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "testing"] edition = "2021" -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +rust-version = "1.78" +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] [lib] proc-macro = true [dependencies] -syn = { version = "2", features = ["full", "extra-traits"] } -quote = "1" +syn = { version = "2.0.77", features = ["full", "extra-traits"] } +quote = "1.0.37" +proc-macro2 = "1.0.86" + +[lints] +workspace = true diff --git a/libafl_derive/src/lib.rs b/libafl_derive/src/lib.rs index a6f1522085..354bbc9ca4 100644 --- a/libafl_derive/src/lib.rs +++ b/libafl_derive/src/lib.rs @@ -1,22 +1,6 @@ //! Derives for `LibAFL` #![no_std] -#![forbid(unexpected_cfgs)] -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(clippy::all)] -#![deny(clippy::pedantic)] -#![allow( - clippy::unreadable_literal, - clippy::type_repetition_in_bounds, - clippy::missing_errors_doc, - clippy::cast_possible_truncation, - clippy::used_underscore_binding, - clippy::ptr_as_ptr, - clippy::missing_panics_doc, - clippy::missing_docs_in_private_items, - clippy::module_name_repetitions, - clippy::unreadable_literal -)] #![cfg_attr(not(test), warn( missing_debug_implementations, missing_docs, @@ -59,7 +43,7 @@ use proc_macro::TokenStream; use quote::quote; -use syn::{parse_macro_input, DeriveInput}; +use syn::{parse_macro_input, Data::Struct, DeriveInput, Field, Fields::Named, Type}; /// Derive macro to implement `SerdeAny`, to use a type in a `SerdeAnyMap` #[proc_macro_derive(SerdeAny)] @@ -69,3 +53,94 @@ pub fn libafl_serdeany_derive(input: TokenStream) -> TokenStream { libafl_bolts::impl_serdeany!(#name); }) } + +/// A derive macro to implement `Display` +/// +/// Derive macro to implement [`core::fmt::Display`] for a struct where all fields implement `Display`. +/// The result is the space separated concatenation of all fields' display. +/// Order of declaration is preserved. +/// Specifically handled cases: +/// Options: Some => inner type display None => "". +/// Vec: inner type display space separated concatenation. +/// Generics and other more or less exotic stuff are not supported. +/// +/// # Examples +/// +/// ```rust +/// use libafl_derive; +/// +/// #[derive(libafl_derive::Display)] +/// struct MyStruct { +/// foo: String, +/// bar: Option, +/// } +/// ``` +/// +/// The above code will expand to: +/// +/// ```rust +/// struct MyStruct { +/// foo: String, +/// bar: Option, +/// } +/// +/// impl core::fmt::Display for MyStruct { +/// fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { +/// f.write_fmt(format_args!(" {0}", self.foo))?; +/// if let Some(opt) = &self.bar { +/// f.write_fmt(format_args!(" {0}", opt))?; +/// } +/// Ok(()) +/// } +/// } +/// ``` +/// +/// # Panics +/// Panics for any non-structs. +#[proc_macro_derive(Display)] +pub fn libafl_display(input: TokenStream) -> TokenStream { + let DeriveInput { ident, data, .. } = parse_macro_input!(input as DeriveInput); + + if let Struct(s) = data { + if let Named(fields) = s.fields { + let fields_fmt = fields.named.iter().map(libafl_display_field_by_type); + + return quote! { + impl core::fmt::Display for #ident { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + #(#fields_fmt)* + Ok(()) + } + } + } + .into(); + } + } + panic!("Only structs are supported"); +} + +fn libafl_display_field_by_type(it: &Field) -> proc_macro2::TokenStream { + let fmt = " {}"; + let ident = &it.ident; + if let Type::Path(type_path) = &it.ty { + if type_path.qself.is_none() && type_path.path.segments.len() == 1 { + let segment = &type_path.path.segments[0]; + if segment.ident == "Option" { + return quote! { + if let Some(opt) = &self.#ident { + write!(f, #fmt, opt)?; + } + }; + } else if segment.ident == "Vec" { + return quote! { + for e in &self.#ident { + write!(f, #fmt, e)?; + } + }; + } + } + } + quote! { + write!(f, #fmt, self.#ident)?; + } +} diff --git a/libafl_frida/Cargo.toml b/libafl_frida/Cargo.toml index 46efae8805..8e7e0e2832 100644 --- a/libafl_frida/Cargo.toml +++ b/libafl_frida/Cargo.toml @@ -10,17 +10,22 @@ license = "MIT OR Apache-2.0" keywords = ["fuzzing", "frida", "instrumentation"] edition = "2021" categories = [ - "development-tools::testing", - "emulators", - "embedded", - "os", - "no-std" + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", ] [package.metadata.docs.rs] no-default-features = true # We can't use auto-download inside docs.rs (no internet) all-features = false -features = ["cmplog", "serdeany_autoreg", "track_hit_feedbacks", "document-features"] +features = [ + "cmplog", + "serdeany_autoreg", + "track_hit_feedbacks", + "document-features", +] [features] default = ["serdeany_autoreg", "auto-download"] @@ -39,70 +44,70 @@ track_hit_feedbacks = ["libafl/track_hit_feedbacks"] auto-download = ["frida-gum-sys/auto-download", "frida-gum/auto-download"] [build-dependencies] -cc = { version = "1.0", features = ["parallel"] } - -[target.'cfg(target_arch = "aarch64")'.dependencies] -yaxpeax-arm = "0.2.4" - -[target.'cfg(target_arch = "x86_64")'.dependencies] -yaxpeax-x86 = "1.2.2" -iced-x86 = { version = "1.20.0", features = ["code_asm"], optional = true } +cc = { workspace = true, features = ["parallel"] } [dependencies] -libafl = { path = "../libafl", default-features = false, version = "0.13.0", features = [ - "std", - "derive", - "frida_cli", -] } -libafl_bolts = { path = "../libafl_bolts", version = "0.13.0", default-features = false, features = [ - "std", - "derive", - "frida_cli" -] } -libafl_targets = { path = "../libafl_targets", version = "0.13.0", features = [ - "std", - "sancov_cmplog", +libafl = { workspace = true, features = ["std", "derive", "frida_cli"] } +libafl_bolts = { workspace = true, features = ["std", "derive", "frida_cli"] } +libafl_targets = { workspace = true, default-features = true, features = [ + "std", + "sancov_cmplog", ] } -nix = { version = "0.29", features = ["mman"] } -libc = "0.2" -hashbrown = "0.14" -rangemap = "1.3" -frida-gum-sys = { version = "0.13.6", features = [ - "event-sink", - "invocation-listener", +nix = { workspace = true, default-features = true, features = ["mman"] } +libc = { workspace = true } +hashbrown = { workspace = true, default-features = true } +rangemap = { workspace = true } +frida-gum-sys = { version = "0.15.1", features = [ + "event-sink", + "invocation-listener", ] } -frida-gum = { version = "0.13.6", features = [ - "event-sink", - "invocation-listener", - "module-names", +frida-gum = { version = "0.15.1", features = [ + "event-sink", + "invocation-listener", + "module-names", + "script", ] } -dynasmrt = "2" +dynasmrt = "3.0.1" -color-backtrace = { version = "0.6", features = ["resolve-modules"] } -termcolor = "1.1.3" -serde = "1.0" -backtrace = { version = "0.3", default-features = false, features = [ - "std", - "serde", +color-backtrace = { version = "0.6.1", features = ["resolve-modules"] } +termcolor = "1.4.1" +serde = { workspace = true, default-features = true } +backtrace = { workspace = true, default-features = false, features = [ + "std", + "serde", ] } -num-traits = "0.2" -ahash = "^0.8" # fetch the latest -paste = "1.0" -log = "0.4.20" -mmap-rs = "0.6.0" +num-traits = { workspace = true, default-features = true } +ahash = { workspace = true, default-features = true } +paste = { workspace = true } +log = { workspace = true } +mmap-rs = "0.6.1" bit_reverse = "0.1.8" -yaxpeax-arch = "0.2.7" - -document-features = { version = "0.2", optional = true } # Document all features of this crate (for `cargo doc`) - -[target.'cfg(windows)'.dependencies] -winsafe = {version = "0.0.21", features = ["kernel"]} +yaxpeax-arch = "0.3.2" +document-features = { workspace = true, optional = true } # Document all features of this crate (for `cargo doc`) [dev-dependencies] -serial_test = { version = "3", default-features = false, features = ["logging"] } -clap = {version = "4.5", features = ["derive"]} -libloading = "0.8" -mimalloc = { version = "*", default-features = false } -dlmalloc ={version = "0.2.6", features = ["global"]} \ No newline at end of file +serial_test = { workspace = true, default-features = false, features = [ + "logging", +] } +clap = { workspace = true, features = ["derive"] } +libloading = "0.8.5" +mimalloc = { workspace = true, default-features = false } +dlmalloc = { version = "0.2.6", features = ["global"] } + +[lints] +workspace = true + +[target.'cfg(target_arch = "aarch64")'.dependencies] +yaxpeax-arm = "0.3.0" + +[target.'cfg(target_arch = "x86_64")'.dependencies] +yaxpeax-x86 = "2.0.0" +iced-x86 = { version = "1.21.0", features = ["code_asm"], optional = true } + +[target.'cfg(windows)'.dependencies] +winsafe = { version = "0.0.22", features = ["kernel"] } + +[target.'cfg(target_vendor="apple")'.dependencies] +mach-sys = { version = "0.5.4" } diff --git a/libafl_frida/build.rs b/libafl_frida/build.rs index 76669ec895..f7e10787fd 100644 --- a/libafl_frida/build.rs +++ b/libafl_frida/build.rs @@ -1,7 +1,11 @@ // build.rs #![forbid(unexpected_cfgs)] +use std::{env, path::Path}; + fn main() { + let out_dir = env::var_os("OUT_DIR").unwrap(); + let out_dir = out_dir.to_string_lossy().to_string(); let target_os = std::env::var("CARGO_CFG_TARGET_OS").unwrap(); if target_os != "ios" { cc::Build::new().file("src/gettls.c").compile("libgettls.a"); @@ -46,7 +50,13 @@ fn main() { "/libpath:{}/.cache/cargo-xwin/xwin/sdk/lib/um/x86_64/", std::env::var("HOME").unwrap() )); - cmd.arg("/dll").arg("/OUT:test_harness.dll"); + cmd.arg("/dll").arg(format!( + "/OUT:{}", + Path::new(&out_dir) + .join("test_harness.so") + .to_str() + .unwrap() + )); let output = cmd.output().expect("Failed to link test_harness.dll"); let output_str = format!( "{:?}\nstatus: {}\nstdout: {}\nstderr: {}", @@ -73,7 +83,7 @@ fn main() { cmd.args(compiler.args()) .arg("test_harness.cpp") .arg("-o") - .arg("test_harness.so") + .arg(Path::new(&out_dir).join("test_harness.so")) .status() .expect("Failed to link test_harness"); } diff --git a/libafl_frida/src/alloc.rs b/libafl_frida/src/alloc.rs index e12bd4013a..c8b69e47b9 100644 --- a/libafl_frida/src/alloc.rs +++ b/libafl_frida/src/alloc.rs @@ -13,6 +13,16 @@ use backtrace::Backtrace; use frida_gum::{PageProtection, RangeDetails}; use hashbrown::HashMap; use libafl_bolts::cli::FuzzerOptions; +#[cfg(target_vendor = "apple")] +use mach_sys::{ + kern_return::KERN_SUCCESS, + message::mach_msg_type_number_t, + traps::mach_task_self, + vm::mach_vm_region_recurse, + vm_prot::VM_PROT_READ, + vm_region::{vm_region_recurse_info_t, vm_region_submap_info_64}, + vm_types::{mach_vm_address_t, mach_vm_size_t, natural_t}, +}; #[cfg(any( windows, target_os = "linux", @@ -28,6 +38,9 @@ use serde::{Deserialize, Serialize}; use crate::asan::errors::{AsanError, AsanErrors}; +#[cfg(target_vendor = "apple")] +const VM_REGION_SUBMAP_INFO_COUNT_64: mach_msg_type_number_t = 19; + /// An allocator wrapper with binary-only address sanitization #[derive(Debug)] pub struct Allocator { @@ -236,7 +249,11 @@ impl Allocator { let address = (metadata.address + self.page_size) as *mut c_void; self.allocations.insert(address as usize, metadata); - log::trace!("serving address: {:?}, size: {:x}", address, size); + log::trace!( + "serving address: {:#x}, size: {:#x}", + address as usize, + size + ); address } @@ -305,7 +322,9 @@ impl Allocator { continue; } // First poison the memory. - Self::poison(map_to_shadow!(self, address), allocation.size); + unsafe { + Self::poison(map_to_shadow!(self, address), allocation.size); + } // Reset the allocaiton metadata object allocation.size = 0; @@ -340,7 +359,11 @@ impl Allocator { } } - fn unpoison(start: usize, size: usize) { + /// Unpoison an area in memory + /// + /// # Safety + /// start needs to be a valid address, We need to be able to fill `size / 8` bytes. + unsafe fn unpoison(start: usize, size: usize) { unsafe { std::slice::from_raw_parts_mut(start as *mut u8, size / 8).fill(0xff); @@ -353,8 +376,11 @@ impl Allocator { } } - /// Poisonn an area in memory - pub fn poison(start: usize, size: usize) { + /// Poison an area in memory + /// + /// # Safety + /// start needs to be a valid address, We need to be able to fill `size / 8` bytes. + pub unsafe fn poison(start: usize, size: usize) { unsafe { std::slice::from_raw_parts_mut(start as *mut u8, size / 8).fill(0x0); @@ -426,7 +452,9 @@ impl Allocator { } if unpoison { - Self::unpoison(shadow_mapping_start, end - start); + unsafe { + Self::unpoison(shadow_mapping_start, end - start); + } } (shadow_mapping_start, (end - start) / 8 + 1) @@ -478,14 +506,11 @@ impl Allocator { //4. The aligned check is where the address and the size is 8 byte aligned. Use check_shadow_aligned to check it //5. The post-alignment is the same as pre-alignment except it is the qword following the aligned portion. Use a specialized check to ensure that [end & ~7, end) is valid. - if size == 0 - /*|| !self.is_managed(address as *mut c_void)*/ - { + if size == 0 { return true; } - if !self.is_managed(address as *mut c_void) { - log::trace!("unmanaged address to check_shadow: {:?}, {size:x}", address); + if !self.is_managed(address.cast_mut()) { return true; } @@ -544,11 +569,11 @@ impl Allocator { map_to_shadow!(self, start) } - /// Checks if the currennt address is one of ours + /// Checks if the current address is one of ours - is this address in the allocator region #[inline] pub fn is_managed(&self, ptr: *mut c_void) -> bool { //self.allocations.contains_key(&(ptr as usize)) - self.shadow_offset <= ptr as usize && (ptr as usize) < self.current_mapping_addr + self.base_mapping_addr <= ptr as usize && (ptr as usize) < self.current_mapping_addr } /// Checks if any of the allocations has not been freed @@ -562,14 +587,72 @@ impl Allocator { } /// Unpoison all the memory that is currently mapped with read permissions. + #[cfg(target_vendor = "apple")] + pub fn unpoison_all_existing_memory(&mut self) { + let task = unsafe { mach_task_self() }; + let mut address: mach_vm_address_t = 0; + let mut size: mach_vm_size_t = 0; + let mut depth: natural_t = 0; + + loop { + let mut kr; + let mut info_count: mach_msg_type_number_t = VM_REGION_SUBMAP_INFO_COUNT_64; + let mut info = vm_region_submap_info_64::default(); + loop { + kr = unsafe { + mach_vm_region_recurse( + task, + &raw mut address, + &raw mut size, + &raw mut depth, + &raw mut info as vm_region_recurse_info_t, + &raw mut info_count, + ) + }; + + if kr != KERN_SUCCESS { + break; + } + + if info.is_submap != 0 { + depth += 1; + continue; + } + + break; + } + + if kr != KERN_SUCCESS { + break; + } + + let start = address as usize; + let end = (address + size) as usize; + + if info.protection & VM_PROT_READ == VM_PROT_READ { + //if its at least readable + if self.shadow_offset <= start && end <= self.current_mapping_addr { + log::trace!("Reached the shadow/allocator region - skipping"); + } else { + log::trace!("Unpoisoning: {:#x}:{:#x}", address, address + size); + self.map_shadow_for_region(start, end, true); + } + } + address += size; + size = 0; + } + } + + /// Unpoisons all memory + #[cfg(not(target_vendor = "apple"))] pub fn unpoison_all_existing_memory(&mut self) { RangeDetails::enumerate_with_prot( PageProtection::Read, &mut |range: &RangeDetails| -> bool { let start = range.memory_range().base_address().0 as usize; let end = start + range.memory_range().size(); - if self.is_managed(start as *mut c_void) { - log::trace!("Not unpoisoning: {:#x}-{:#x}, is_managed", start, end); + if self.shadow_offset <= start && end <= self.current_mapping_addr { + log::trace!("Reached the shadow/allocator region - skipping"); } else { log::trace!("Unpoisoning: {:#x}-{:#x}", start, end); self.map_shadow_for_region(start, end, true); diff --git a/libafl_frida/src/asan/asan_rt.rs b/libafl_frida/src/asan/asan_rt.rs index aaaf4c5472..94ac0e0bf6 100644 --- a/libafl_frida/src/asan/asan_rt.rs +++ b/libafl_frida/src/asan/asan_rt.rs @@ -6,11 +6,9 @@ even if the target would not have crashed under normal conditions. this helps finding mem errors early. */ -use core::{ - fmt::{self, Debug, Formatter}, - ptr::addr_of_mut, -}; +use core::fmt::{self, Debug, Formatter}; use std::{ + cell::Cell, ffi::{c_char, c_void}, ptr::write_volatile, rc::Rc, @@ -59,17 +57,18 @@ extern "C" { fn __register_frame(begin: *mut c_void); } -#[cfg(not(target_os = "ios"))] +#[cfg(not(target_vendor = "apple"))] extern "C" { fn tls_ptr() -> *const c_void; } -/// The count of registers that need to be saved by the asan runtime -/// sixteen general purpose registers are put in this order, rax, rbx, rcx, rdx, rbp, rsp, rsi, rdi, r8-r15, plus instrumented rip, accessed memory addr and true rip +/// The count of registers that need to be saved by the `ASan` runtime. +/// +/// Sixteen general purpose registers are put in this order, `rax`, `rbx`, `rcx`, `rdx`, `rbp`, `rsp`, `rsi`, `rdi`, `r8-r15`, plus instrumented `rip`, accessed memory addr and true `rip` #[cfg(target_arch = "x86_64")] pub const ASAN_SAVE_REGISTER_COUNT: usize = 19; -/// The registers that need to be saved by the asan runtime, as names +/// The registers that need to be saved by the `ASan` runtime, as names #[cfg(target_arch = "x86_64")] pub const ASAN_SAVE_REGISTER_NAMES: [&str; ASAN_SAVE_REGISTER_COUNT] = [ "rax", @@ -93,6 +92,10 @@ pub const ASAN_SAVE_REGISTER_NAMES: [&str; ASAN_SAVE_REGISTER_COUNT] = [ "actual rip", ]; +thread_local! { + static ASAN_IN_HOOK: Cell = const { Cell::new(false) }; +} + /// The count of registers that need to be saved by the asan runtime #[cfg(target_arch = "aarch64")] pub const ASAN_SAVE_REGISTER_COUNT: usize = 32; @@ -104,8 +107,9 @@ const ASAN_EH_FRAME_FDE_OFFSET: u32 = 20; #[cfg(target_arch = "aarch64")] const ASAN_EH_FRAME_FDE_ADDRESS_OFFSET: u32 = 28; -/// The frida address sanitizer runtime, providing address sanitization. -/// When executing in `ASAN`, each memory access will get checked, using frida stalker under the hood. +/// The `FRIDA` address sanitizer runtime, providing address sanitization. +/// +/// When executing in `ASan`, each memory access will get checked, using `FRIDA` stalker under the hood. /// The runtime can report memory errors that occurred during execution, /// even if the target would not have crashed under normal conditions. /// this helps finding mem errors early. @@ -158,7 +162,7 @@ impl FridaRuntime for AsanRuntime { fn init( &mut self, gum: &Gum, - _ranges: &RangeMap, + _ranges: &RangeMap, module_map: &Rc, ) { self.allocator.init(); @@ -179,7 +183,6 @@ impl FridaRuntime for AsanRuntime { self.register_hooks(gum); self.generate_instrumentation_blobs(); self.unpoison_all_existing_memory(); - self.register_thread(); } @@ -210,7 +213,11 @@ impl FridaRuntime for AsanRuntime { let target_bytes = input.target_bytes(); let slice = target_bytes.as_slice(); - self.poison(slice.as_ptr() as usize, slice.len()); + // # Safety + // The ptr and length are correct. + unsafe { + self.poison(slice.as_ptr() as usize, slice.len()); + } self.reset_allocations(); Ok(()) @@ -276,7 +283,11 @@ impl AsanRuntime { } /// Make sure the specified memory is poisoned - pub fn poison(&mut self, address: usize, size: usize) { + /// + /// # Safety + /// The address needs to be a valid address, the size needs to be correct. + /// This will dereference at the address. + pub unsafe fn poison(&mut self, address: usize, size: usize) { Allocator::poison(self.allocator.map_to_shadow(address), size); } @@ -313,7 +324,7 @@ impl AsanRuntime { /// Register the current thread with the runtime, implementing shadow memory for its stack and /// tls mappings. #[allow(clippy::unused_self)] - #[cfg(not(target_os = "ios"))] + #[cfg(not(target_vendor = "apple"))] pub fn register_thread(&mut self) { let (stack_start, stack_end) = Self::current_stack(); let (tls_start, tls_end) = Self::current_tls(); @@ -330,7 +341,7 @@ impl AsanRuntime { /// Register the current thread with the runtime, implementing shadow memory for its stack mapping. #[allow(clippy::unused_self)] - #[cfg(target_os = "ios")] + #[cfg(target_vendor = "apple")] pub fn register_thread(&mut self) { let (stack_start, stack_end) = Self::current_stack(); self.allocator @@ -339,7 +350,7 @@ impl AsanRuntime { log::info!("registering thread with stack {stack_start:x}:{stack_end:x}"); } - /// Get the maximum stack size for the current stack + // /// Get the maximum stack size for the current stack // #[must_use] // #[cfg(target_vendor = "apple")] // fn max_stack_size() -> usize { @@ -347,7 +358,7 @@ impl AsanRuntime { // rlim_cur: 0, // rlim_max: 0, // }; - // assert!(unsafe { getrlimit(RLIMIT_STACK, addr_of_mut!(stack_rlimit)) } == 0); + // assert!(unsafe { getrlimit(RLIMIT_STACK, &raw mut stack_rlimit) } == 0); // // stack_rlimit.rlim_cur as usize // } @@ -360,7 +371,7 @@ impl AsanRuntime { // rlim_cur: 0, // rlim_max: 0, // }; - // assert!(unsafe { getrlimit64(RLIMIT_STACK, addr_of_mut!(stack_rlimit)) } == 0); + // assert!(unsafe { getrlimit64(RLIMIT_STACK, &raw mut stack_rlimit) } == 0); // // stack_rlimit.rlim_cur as usize // } @@ -372,13 +383,17 @@ impl AsanRuntime { fn range_for_address(address: usize) -> (usize, usize) { let mut start = 0; let mut end = 0; - RangeDetails::enumerate_with_prot(PageProtection::NoAccess, &mut |range: &RangeDetails| { + + RangeDetails::enumerate_with_prot(PageProtection::Read, &mut |range: &RangeDetails| { let range_start = range.memory_range().base_address().0 as usize; let range_end = range_start + range.memory_range().size(); if range_start <= address && range_end >= address { start = range_start; end = range_end; - // I want to stop iteration here + return false; + } + if address < start { + //if the address is less than the start then we cannot find it return false; } true @@ -400,54 +415,24 @@ impl AsanRuntime { #[must_use] pub fn current_stack() -> (usize, usize) { let mut stack_var = 0xeadbeef; - let stack_address = addr_of_mut!(stack_var) as usize; + let stack_address = &raw mut stack_var as usize; // let range_details = RangeDetails::with_address(stack_address as u64).unwrap(); // Write something to (hopefully) make sure the val isn't optimized out + unsafe { write_volatile(&mut stack_var, 0xfadbeef); } - let mut range = None; - for area in mmap_rs::MemoryAreas::open(None).unwrap() { - let area_ref = area.as_ref().unwrap(); - if area_ref.start() <= stack_address && stack_address <= area_ref.end() { - range = Some((area_ref.end() - 1024 * 1024, area_ref.end())); - break; - } - } - if let Some((start, end)) = range { - // #[cfg(unix)] - // { - // let max_start = end - Self::max_stack_size(); - // - // let flags = ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE; - // #[cfg(not(target_vendor = "apple"))] - // let flags = flags | MapFlags::MAP_STACK; - // - // if start != max_start { - // let mapping = unsafe { - // mmap( - // NonZeroUsize::new(max_start), - // NonZeroUsize::new(start - max_start).unwrap(), - // ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, - // flags, - // -1, - // 0, - // ) - // }; - // assert!(mapping.unwrap() as usize == max_start); - // } - // (max_start, end) - // } - // #[cfg(windows)] - (start, end) - } else { - panic!("Couldn't find stack mapping!"); - } + + let range = Self::range_for_address(stack_address); + + assert_ne!(range.0, 0, "Couldn't find stack mapping!"); + + (range.1 - 1024 * 1024, range.1) } /// Determine the tls start, end for the currently running thread #[must_use] - #[cfg(not(target_os = "ios"))] + #[cfg(not(target_vendor = "apple"))] fn current_tls() -> (usize, usize) { let tls_address = unsafe { tls_ptr() } as usize; @@ -488,13 +473,14 @@ impl AsanRuntime { #[allow(clippy::too_many_lines)] pub fn register_hooks(&mut self, gum: &Gum) { let mut interceptor = Interceptor::obtain(gum); + let module = Module::obtain(gum); macro_rules! hook_func { //No library case ($name:ident, ($($param:ident : $param_type:ty),*), $return_type:ty) => { paste::paste! { log::trace!("Hooking {}", stringify!($name)); - let target_function = frida_gum::Module::find_export_by_name(None, stringify!($name)).expect("Failed to find function"); + let target_function = module.find_export_by_name(None, stringify!($name)).expect("Failed to find function"); static [<$name:snake:upper _PTR>]: std::sync::OnceLock $return_type> = std::sync::OnceLock::new(); @@ -506,22 +492,18 @@ impl AsanRuntime { let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime); //is this necessary? The stalked return address will always be the real return address // let real_address = this.real_address_for_stalked(invocation.return_addr()); - let original = [<$name:snake:upper _PTR>].get().unwrap(); - if this.hooks_enabled { - let previous_hook_state = this.hooks_enabled; - this.hooks_enabled = false; - let ret = this.[](*original, $($param),*); - this.hooks_enabled = previous_hook_state; - ret - } else { + let original = [<$name:snake:upper _PTR>].get().unwrap(); - let previous_hook_state = this.hooks_enabled; - this.hooks_enabled = false; - let ret = (original)($($param),*); - this.hooks_enabled = previous_hook_state; - ret - } + if !ASAN_IN_HOOK.get() && this.hooks_enabled { + ASAN_IN_HOOK.set(true); + let ret = this.[](*original, $($param),*); + ASAN_IN_HOOK.set(false); + ret + } else { + let ret = (original)($($param),*); + ret } + } let self_ptr = core::ptr::from_ref(self) as usize; let _ = interceptor.replace( @@ -538,7 +520,7 @@ impl AsanRuntime { paste::paste! { log::trace!("Hooking {}:{}", $lib, stringify!($name)); - let target_function = frida_gum::Module::find_export_by_name(Some($lib), stringify!($name)).expect("Failed to find function"); + let target_function = module.find_export_by_name(Some($lib), stringify!($name)).expect("Failed to find function"); static [<$lib_ident:snake:upper _ $name:snake:upper _PTR>]: std::sync::OnceLock $return_type> = std::sync::OnceLock::new(); @@ -551,18 +533,13 @@ impl AsanRuntime { //is this necessary? The stalked return address will always be the real return address // let real_address = this.real_address_for_stalked(invocation.return_addr()); let original = [<$lib_ident:snake:upper _ $name:snake:upper _PTR>].get().unwrap(); - if this.hooks_enabled { - let previous_hook_state = this.hooks_enabled; - this.hooks_enabled = false; + if !ASAN_IN_HOOK.get() && this.hooks_enabled { + ASAN_IN_HOOK.set(true); let ret = this.[](*original, $($param),*); - this.hooks_enabled = previous_hook_state; + ASAN_IN_HOOK.set(false); ret } else { - - let previous_hook_state = this.hooks_enabled; - this.hooks_enabled = false; let ret = (original)($($param),*); - this.hooks_enabled = previous_hook_state; ret } } @@ -585,7 +562,7 @@ impl AsanRuntime { ($name:ident, ($($param:ident : $param_type:ty),*), $return_type:ty) => { paste::paste! { log::trace!("Hooking {}", stringify!($name)); - let target_function = frida_gum::Module::find_export_by_name(None, stringify!($name)).expect("Failed to find function"); + let target_function = module.find_export_by_name(None, stringify!($name)).expect("Failed to find function"); static [<$name:snake:upper _PTR>]: std::sync::OnceLock $return_type> = std::sync::OnceLock::new(); @@ -598,18 +575,15 @@ impl AsanRuntime { let mut invocation = Interceptor::current_invocation(); let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime); let original = [<$name:snake:upper _PTR>].get().unwrap(); - - if this.hooks_enabled && this.[]($($param),*){ - let previous_hook_state = this.hooks_enabled; - this.hooks_enabled = false; + //don't check if hooks are enabled as there are certain cases where we want to run the hook even if we are out of the program + //For example, sometimes libafl will allocate certain things during the run and free them after the run. This results in a bug where a buffer will come from libafl-frida alloc and be freed in the normal allocator. + if !ASAN_IN_HOOK.get() && this.[]($($param),*){ + ASAN_IN_HOOK.set(true); let ret = this.[](*original, $($param),*); - this.hooks_enabled = previous_hook_state; + ASAN_IN_HOOK.set(false); ret } else { - let previous_hook_state = this.hooks_enabled; - this.hooks_enabled = false; let ret = (original)($($param),*); - this.hooks_enabled = previous_hook_state; ret } @@ -628,7 +602,7 @@ impl AsanRuntime { ($lib:literal, $lib_ident:ident, $name:ident, ($($param:ident : $param_type:ty),*), $return_type:ty) => { paste::paste! { log::trace!("Hooking {}:{}", $lib, stringify!($name)); - let target_function = frida_gum::Module::find_export_by_name(Some($lib), stringify!($name)).expect("Failed to find function"); + let target_function = module.find_export_by_name(Some($lib), stringify!($name)).expect("Failed to find function"); static [<$lib_ident:snake:upper _ $name:snake:upper _PTR>]: std::sync::OnceLock $return_type> = std::sync::OnceLock::new(); @@ -641,18 +615,15 @@ impl AsanRuntime { let mut invocation = Interceptor::current_invocation(); let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime); let original = [<$lib_ident:snake:upper _ $name:snake:upper _PTR>].get().unwrap(); - - if this.hooks_enabled && this.[]($($param),*){ - let previous_hook_state = this.hooks_enabled; - this.hooks_enabled = false; + //don't check if hooks are enabled as there are certain cases where we want to run the hook even if we are out of the program + //For example, sometimes libafl will allocate certain things during the run and free them after the run. This results in a bug where a buffer will come from libafl-frida alloc and be freed in the normal allocator. + if !ASAN_IN_HOOK.get() && this.[]($($param),*){ + ASAN_IN_HOOK.set(true); let ret = this.[](*original, $($param),*); - this.hooks_enabled = previous_hook_state; + ASAN_IN_HOOK.set(false); ret } else { - let previous_hook_state = this.hooks_enabled; - this.hooks_enabled = false; let ret = (original)($($param),*); - this.hooks_enabled = previous_hook_state; ret } @@ -720,7 +691,7 @@ impl AsanRuntime { macro_rules! hook_heap_windows { ($libname:literal, $lib_ident:ident) => { log::info!("Hooking allocator functions in {}", $libname); - for export in Module::enumerate_exports($libname) { + for export in module.enumerate_exports($libname) { // log::trace!("- {}", export.name); match &export.name[..] { "NtGdiCreateCompatibleDC" => { @@ -946,7 +917,7 @@ impl AsanRuntime { macro_rules! hook_cpp { ($libname:literal, $lib_ident:ident) => { log::info!("Hooking c++ functions in {}", $libname); - for export in Module::enumerate_exports($libname) { + for export in module.enumerate_exports($libname) { match &export.name[..] { "_Znam" => { hook_func!($libname, $lib_ident, _Znam, (size: usize), *mut c_void); @@ -1766,14 +1737,14 @@ impl AsanRuntime { macro_rules! shadow_check { ($ops:ident, $width:expr) => {dynasm!($ops ; .arch aarch64 -// ; brk #0xe + //; brk #0xe ; stp x2, x3, [sp, #-0x10]! ; mov x1, xzr // ; add x1, xzr, x1, lsl #shadow_bit ; add x1, x1, x0, lsr #3 ; ubfx x1, x1, #0, #(shadow_bit + 1) ; mov x2, #1 - ; add x1, x1, x2, lsl #shadow_bit + ; add x1, x1, x2, lsl #shadow_bit //x1 contains the offset of the shadow byte ; ldr w1, [x1, #0] //w1 contains our shadow check ; and x0, x0, #7 //x0 is the offset for unaligned accesses ; rev32 x1, x1 @@ -1917,13 +1888,13 @@ impl AsanRuntime { // Ignore eh_frame_cie for amd64 // See discussions https://github.com/AFLplusplus/LibAFL/pull/331 ;->accessed_address: - ; .dword 0x0 + ; .i32 0x0 ; self_addr: - ; .qword core::ptr::from_mut(self) as *mut c_void as i64 + ; .i64 core::ptr::from_mut(self) as *mut c_void as i64 ; self_regs_addr: - ; .qword addr_of_mut!(self.regs) as i64 + ; .i64 &raw mut self.regs as i64 ; trap_func: - ; .qword AsanRuntime::handle_trap as *mut c_void as i64 + ; .i64 AsanRuntime::handle_trap as *mut c_void as i64 ); self.blob_report = Some(ops_report.finalize().unwrap().into_boxed_slice()); @@ -1972,7 +1943,7 @@ impl AsanRuntime { ; mov x25, x1 // address of instrumented instruction. ; str x25, [x28, 0xf8] - ; .dword 0xd53b4218u32 as i32 // mrs x24, nzcv + ; .i32 0xd53b4218u32 as i32 // mrs x24, nzcv ; ldp x0, x1, [sp, 0x20] ; stp x0, x1, [x28] @@ -1994,7 +1965,7 @@ impl AsanRuntime { ; ldr x1, >trap_func ; blr x1 - ; .dword 0xd51b4218u32 as i32 // msr nzcv, x24 + ; .i32 0xd51b4218u32 as i32 // msr nzcv, x24 ; ldr x0, >self_regs_addr ; ldp x2, x3, [x0, #0x10] ; ldp x4, x5, [x0, #0x20] @@ -2018,15 +1989,15 @@ impl AsanRuntime { ; br x1 // go back to the 'return address' ; self_addr: - ; .qword core::ptr::from_mut(self) as *mut c_void as i64 + ; .i64 core::ptr::from_mut(self) as *mut c_void as i64 ; self_regs_addr: - ; .qword addr_of_mut!(self.regs) as i64 + ; .i64 &raw mut self.regs as i64 ; trap_func: - ; .qword AsanRuntime::handle_trap as *mut c_void as i64 + ; .i64 AsanRuntime::handle_trap as *mut c_void as i64 ; register_frame_func: - ; .qword __register_frame as *mut c_void as i64 + ; .i64 __register_frame as *mut c_void as i64 ; eh_frame_cie_addr: - ; .qword addr_of_mut!(self.eh_frame) as i64 + ; .i64 &raw mut self.eh_frame as i64 ); self.eh_frame = [ 0x14, 0, 0x00527a01, 0x011e7c01, 0x001f0c1b, // diff --git a/libafl_frida/src/asan/errors.rs b/libafl_frida/src/asan/errors.rs index 2e8de86622..4e7078054f 100644 --- a/libafl_frida/src/asan/errors.rs +++ b/libafl_frida/src/asan/errors.rs @@ -14,11 +14,10 @@ use frida_gum::interceptor::Interceptor; use frida_gum::ModuleDetails; use libafl::{ corpus::Testcase, - events::EventFirer, executors::ExitKind, - feedbacks::Feedback, - inputs::{HasTargetBytes, UsesInput}, - observers::{Observer, ObserversTuple}, + feedbacks::{Feedback, StateInitializer}, + inputs::HasTargetBytes, + observers::Observer, state::State, Error, HasMetadata, }; @@ -579,11 +578,8 @@ pub enum AsanErrorsObserver { Static, } -impl Observer for AsanErrorsObserver -where - S: UsesInput, -{ - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { +impl Observer for AsanErrorsObserver { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { AsanErrors::get_mut_blocking().clear(); Ok(()) @@ -609,6 +605,7 @@ impl AsanErrorsObserver { /// /// # Safety /// The field should not be accessed multiple times at the same time (i.e., from different threads)! + #[must_use] pub fn from_static_asan_errors() -> Self { Self::Static } @@ -650,24 +647,23 @@ pub struct AsanErrorsFeedback { phantom: PhantomData, } -impl Feedback for AsanErrorsFeedback +impl StateInitializer for AsanErrorsFeedback {} + +impl Feedback for AsanErrorsFeedback where S: State + Debug, S::Input: HasTargetBytes, + OT: MatchNameRef, { #[allow(clippy::wrong_self_convention)] - fn is_interesting( + fn is_interesting( &mut self, _state: &mut S, _manager: &mut EM, _input: &S::Input, observers: &OT, _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { + ) -> Result { let observer = observers .get(&self.observer_handle) .expect("An AsanErrorsFeedback needs an AsanErrorsObserver"); @@ -680,16 +676,13 @@ where } } - fn append_metadata( + fn append_metadata( &mut self, _state: &mut S, _manager: &mut EM, _observers: &OT, testcase: &mut Testcase, - ) -> Result<(), Error> - where - OT: ObserversTuple, - { + ) -> Result<(), Error> { if let Some(errors) = &self.errors { testcase.add_metadata(errors.clone()); } diff --git a/libafl_frida/src/asan/hook_funcs.rs b/libafl_frida/src/asan/hook_funcs.rs index c5b1f2efee..3d5a122f61 100644 --- a/libafl_frida/src/asan/hook_funcs.rs +++ b/libafl_frida/src/asan/hook_funcs.rs @@ -1,3 +1,5 @@ +#![allow(clippy::used_underscore_items)] + //! The allocator hooks for address sanitizer. use std::ffi::c_void; @@ -1235,8 +1237,10 @@ impl AsanRuntime { res } + /// # Safety + /// `addr` will get dereferenced. #[inline] - pub fn hook_munmap( + pub unsafe fn hook_munmap( &mut self, original: extern "C" fn(addr: *const c_void, length: usize) -> i32, addr: *const c_void, diff --git a/libafl_frida/src/cmplog_rt.rs b/libafl_frida/src/cmplog_rt.rs index c6f1c2c283..b80a7a4f56 100644 --- a/libafl_frida/src/cmplog_rt.rs +++ b/libafl_frida/src/cmplog_rt.rs @@ -1,3 +1,5 @@ +//! The [`FRIDA`](https://frida.re) `CmpLog` runtime +//! //! Functionality for [`frida`](https://frida.re)-based binary-only `CmpLog`. //! With it, a fuzzer can collect feedback about each compare that happened in the target //! This allows the fuzzer to potentially solve the compares, if a compare value is directly @@ -6,8 +8,6 @@ #[cfg(target_arch = "aarch64")] use core::ffi::c_void; -#[cfg(all(feature = "cmplog", target_arch = "x86_64"))] -use std::collections::HashMap; use std::rc::Rc; use dynasmrt::dynasm; @@ -23,6 +23,8 @@ use frida_gum::{ }; use frida_gum_sys::Insn; #[cfg(all(feature = "cmplog", target_arch = "x86_64"))] +use hashbrown::HashMap; +#[cfg(all(feature = "cmplog", target_arch = "x86_64"))] use iced_x86::{ BlockEncoder, Code, DecoderOptions, Instruction, InstructionBlock, MemoryOperand, MemorySize, OpKind, Register, @@ -122,7 +124,7 @@ impl FridaRuntime for CmpLogRuntime { fn init( &mut self, _gum: &frida_gum::Gum, - _ranges: &RangeMap, + _ranges: &RangeMap, _module_map: &Rc, ) { self.generate_instrumentation_blobs(); @@ -216,7 +218,7 @@ impl CmpLogRuntime { ; stp x26, x27, [sp, #-0x10]! ; stp x28, x29, [sp, #-0x10]! ; stp x30, xzr, [sp, #-0x10]! - ; .dword 0xd53b4218u32 as i32 // mrs x24, nzcv + ; .u32 0xd53b4218_u32 // mrs x24, nzcv // jump to rust based population of the lists ; mov x2, x0 ; adr x3, >done @@ -224,7 +226,7 @@ impl CmpLogRuntime { ; ldr x0, >self_addr ; blr x4 // restore the reg state before returning to the caller - ; .dword 0xd51b4218u32 as i32 // msr nzcv, x24 + ; .u32 0xd51b4218_u32 // msr nzcv, x24 ; ldp x30, xzr, [sp], #0x10 ; ldp x28, x29, [sp], #0x10 ; ldp x26, x27, [sp], #0x10 @@ -242,9 +244,9 @@ impl CmpLogRuntime { ; ldp x2, x3, [sp], #0x10 ; b >done ; self_addr: - ; .qword core::ptr::from_mut(self) as *mut c_void as i64 + ; .u64 core::ptr::from_mut(self) as *mut c_void as u64 ; populate_lists: - ; .qword CmpLogRuntime::populate_lists as *mut c_void as i64 + ; .u64 CmpLogRuntime::populate_lists as *mut c_void as u64 ; done: );}; } diff --git a/libafl_frida/src/coverage_rt.rs b/libafl_frida/src/coverage_rt.rs index 971d2ee64d..4f1e17a031 100644 --- a/libafl_frida/src/coverage_rt.rs +++ b/libafl_frida/src/coverage_rt.rs @@ -1,5 +1,5 @@ //! Functionality regarding binary-only coverage collection. -use core::ptr::addr_of_mut; + use std::{cell::RefCell, marker::PhantomPinned, pin::Pin, rc::Rc}; #[cfg(target_arch = "aarch64")] @@ -37,7 +37,7 @@ impl FridaRuntime for CoverageRuntime { fn init( &mut self, _gum: &frida_gum::Gum, - _ranges: &RangeMap, + _ranges: &RangeMap, _module_map: &Rc, ) { } @@ -62,6 +62,7 @@ impl FridaRuntime for CoverageRuntime { impl CoverageRuntime { /// Create a new coverage runtime #[must_use] + #[allow(clippy::large_stack_arrays)] pub fn new() -> Self { Self(Rc::pin(RefCell::new(CoverageRuntimeInner { map: [0_u8; MAP_SIZE], @@ -82,8 +83,8 @@ impl CoverageRuntime { #[allow(clippy::cast_possible_wrap)] pub fn generate_inline_code(&mut self, h64: u64) -> Box<[u8]> { let mut borrow = self.0.borrow_mut(); - let prev_loc_ptr = addr_of_mut!(borrow.previous_pc); - let map_addr_ptr = addr_of_mut!(borrow.map); + let prev_loc_ptr = &raw mut borrow.previous_pc; + let map_addr_ptr = &raw mut borrow.map; let mut ops = dynasmrt::VecAssembler::::new(0); dynasm!(ops ; .arch aarch64 @@ -123,13 +124,13 @@ impl CoverageRuntime { ; b >end ;map_addr: - ;.qword map_addr_ptr as i64 + ;.i64 map_addr_ptr as i64 ;previous_loc: - ;.qword prev_loc_ptr as i64 + ;.i64 prev_loc_ptr as i64 ;loc: - ;.qword h64 as i64 + ;.i64 h64 as i64 ;loc_shr: - ;.qword (h64 >> 1) as i64 + ;.i64 (h64 >> 1) as i64 ;end: ); let ops_vec = ops.finalize().unwrap(); @@ -140,8 +141,8 @@ impl CoverageRuntime { #[cfg(target_arch = "x86_64")] pub fn generate_inline_code(&mut self, h64: u64) -> Box<[u8]> { let mut borrow = self.0.borrow_mut(); - let prev_loc_ptr = addr_of_mut!(borrow.previous_pc); - let map_addr_ptr = addr_of_mut!(borrow.map); + let prev_loc_ptr = &raw mut borrow.previous_pc; + let map_addr_ptr = &raw mut borrow.map; let mut ops = dynasmrt::VecAssembler::::new(0); dynasm!(ops ; .arch x64 @@ -202,7 +203,7 @@ impl CoverageRuntime { // // Since we also need to spill some registers in order to update our // coverage map, in the event of a long branch, we can simply re-use - // these spilt registers. This, however, means we need to retard the + // these spilt registers. This, however, means we need to reset the // code writer so that we can overwrite the so-called "restoration // prologue". #[cfg(target_arch = "aarch64")] diff --git a/libafl_frida/src/drcov_rt.rs b/libafl_frida/src/drcov_rt.rs index 4b250cc3c9..5da459b314 100644 --- a/libafl_frida/src/drcov_rt.rs +++ b/libafl_frida/src/drcov_rt.rs @@ -23,7 +23,7 @@ pub struct DrCovRuntime { /// The basic blocks of this execution pub drcov_basic_blocks: Vec, /// The memory ranges of this target - ranges: RangeMap, + ranges: RangeMap, coverage_directory: PathBuf, } @@ -32,7 +32,7 @@ impl FridaRuntime for DrCovRuntime { fn init( &mut self, _gum: &frida_gum::Gum, - ranges: &RangeMap, + ranges: &RangeMap, _module_map: &Rc, ) { self.ranges = ranges.clone(); @@ -61,8 +61,8 @@ impl FridaRuntime for DrCovRuntime { let mut coverage_hasher = RandomState::with_seeds(0, 0, 0, 0).build_hasher(); for bb in &self.drcov_basic_blocks { - coverage_hasher.write_usize(bb.start); - coverage_hasher.write_usize(bb.end); + coverage_hasher.write_u64(bb.start); + coverage_hasher.write_u64(bb.end); } let coverage_hash = coverage_hasher.finish(); diff --git a/libafl_frida/src/executor.rs b/libafl_frida/src/executor.rs index 57629fa060..0a113e83a0 100644 --- a/libafl_frida/src/executor.rs +++ b/libafl_frida/src/executor.rs @@ -9,13 +9,14 @@ use frida_gum::{ }; #[cfg(windows)] use libafl::{ + corpus::Corpus, executors::{hooks::inprocess::InProcessHooks, inprocess::HasInProcessHooks}, state::{HasCorpus, HasSolutions}, }; use libafl::{ executors::{Executor, ExitKind, HasObservers, InProcessExecutor}, inputs::HasTargetBytes, - observers::{ObserversTuple, UsesObservers}, + observers::ObserversTuple, state::{HasExecutions, State, UsesState}, Error, }; @@ -33,26 +34,26 @@ where H: FnMut(&S::Input) -> ExitKind, S::Input: HasTargetBytes, S: State, - OT: ObserversTuple, + OT: ObserversTuple, 'b: 'a, { base: InProcessExecutor<'a, H, OT, S>, // thread_id for the Stalker thread_id: Option, /// Frida's dynamic rewriting engine - stalker: Stalker<'a>, + stalker: Stalker, /// User provided callback for instrumentation helper: &'c mut FridaInstrumentationHelper<'b, RT>, followed: bool, _phantom: PhantomData<&'b u8>, } -impl<'a, 'b, 'c, H, OT, RT, S> Debug for FridaInProcessExecutor<'a, 'b, 'c, H, OT, RT, S> +impl Debug for FridaInProcessExecutor<'_, '_, '_, H, OT, RT, S> where H: FnMut(&S::Input) -> ExitKind, S: State, S::Input: HasTargetBytes, - OT: ObserversTuple + Debug, + OT: ObserversTuple + Debug, { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { f.debug_struct("FridaInProcessExecutor") @@ -63,14 +64,13 @@ where } } -impl<'a, 'b, 'c, EM, H, OT, RT, S, Z> Executor - for FridaInProcessExecutor<'a, 'b, 'c, H, OT, RT, S> +impl Executor for FridaInProcessExecutor<'_, '_, '_, H, OT, RT, S> where EM: UsesState, H: FnMut(&S::Input) -> ExitKind, S: State + HasExecutions, S::Input: HasTargetBytes, - OT: ObserversTuple, + OT: ObserversTuple, RT: FridaRuntimeTuple, Z: UsesState, { @@ -120,33 +120,24 @@ where } } -impl<'a, 'b, 'c, H, OT, RT, S> UsesObservers for FridaInProcessExecutor<'a, 'b, 'c, H, OT, RT, S> +impl UsesState for FridaInProcessExecutor<'_, '_, '_, H, OT, RT, S> where H: FnMut(&S::Input) -> ExitKind, - OT: ObserversTuple, - S: State, - S::Input: HasTargetBytes, -{ - type Observers = OT; -} - -impl<'a, 'b, 'c, H, OT, RT, S> UsesState for FridaInProcessExecutor<'a, 'b, 'c, H, OT, RT, S> -where - H: FnMut(&S::Input) -> ExitKind, - OT: ObserversTuple, + OT: ObserversTuple, S: State, S::Input: HasTargetBytes, { type State = S; } -impl<'a, 'b, 'c, H, OT, RT, S> HasObservers for FridaInProcessExecutor<'a, 'b, 'c, H, OT, RT, S> +impl HasObservers for FridaInProcessExecutor<'_, '_, '_, H, OT, RT, S> where H: FnMut(&S::Input) -> ExitKind, S::Input: HasTargetBytes, S: State, - OT: ObserversTuple, + OT: ObserversTuple, { + type Observers = OT; #[inline] fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { self.base.observers() @@ -163,7 +154,7 @@ where H: FnMut(&S::Input) -> ExitKind, S: State, S::Input: HasTargetBytes, - OT: ObserversTuple, + OT: ObserversTuple, RT: FridaRuntimeTuple, { /// Creates a new [`FridaInProcessExecutor`]. @@ -196,12 +187,12 @@ where // Include the current module (the fuzzer) in stalked ranges. We clone the ranges so that // we don't add it to the INSTRUMENTED ranges. let mut ranges = helper.ranges().clone(); - for module in frida_gum::Module::enumerate_modules() { + for module in frida_gum::Module::obtain(gum).enumerate_modules() { if module.base_address < Self::new as usize - && (Self::new as usize) < module.base_address + module.size + && (Self::new as usize as u64) < module.base_address as u64 + module.size as u64 { ranges.insert( - module.base_address..(module.base_address + module.size), + module.base_address as u64..(module.base_address as u64 + module.size as u64), (0xffff, "fuzzer".to_string()), ); break; @@ -210,11 +201,13 @@ where log::info!("disable_excludes: {:}", helper.disable_excludes); if !helper.disable_excludes { - for range in ranges.gaps(&(0..usize::MAX)) { + for range in ranges.gaps(&(0..u64::MAX)) { log::info!("excluding range: {:x}-{:x}", range.start, range.end); stalker.exclude(&MemoryRange::new( NativePointer(range.start as *mut c_void), - range.end - range.start, + usize::try_from(range.end - range.start).unwrap_or_else(|err| { + panic!("Address out of usize range: {range:?} - {err}") + }), )); } } @@ -240,8 +233,10 @@ where H: FnMut(&S::Input) -> ExitKind, S: State + HasSolutions + HasCorpus + HasExecutions, S::Input: HasTargetBytes, - OT: ObserversTuple, + OT: ObserversTuple, RT: FridaRuntimeTuple, + ::Solutions: Corpus, //delete me + <::Corpus as Corpus>::Input: Clone, //delete me { /// the timeout handler #[inline] diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index 6d82732745..f0e6ae64d1 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -1,7 +1,8 @@ use core::fmt::{self, Debug, Formatter}; use std::{ cell::{Ref, RefCell, RefMut}, - fs, + ffi::CStr, + fs::{self, read_to_string}, path::{Path, PathBuf}, rc::Rc, }; @@ -9,13 +10,17 @@ use std::{ use frida_gum::{ instruction_writer::InstructionWriter, stalker::{StalkerIterator, StalkerOutput, Transformer}, - Gum, Module, ModuleDetails, ModuleMap, PageProtection, + Backend, Gum, ModuleDetails, ModuleMap, Script, }; +use frida_gum_sys::gchar; use libafl::{ inputs::{HasTargetBytes, Input}, Error, }; -use libafl_bolts::{cli::FuzzerOptions, tuples::MatchFirstType}; +use libafl_bolts::{ + cli::{FridaScriptBackend, FuzzerOptions}, + tuples::MatchFirstType, +}; use libafl_targets::drcov::DrCovBasicBlock; #[cfg(unix)] use nix::sys::mman::{mmap_anonymous, MapFlags, ProtFlags}; @@ -37,7 +42,7 @@ pub trait FridaRuntime: 'static + Debug { fn init( &mut self, gum: &Gum, - ranges: &RangeMap, + ranges: &RangeMap, module_map: &Rc, ); /// Deinitialization @@ -56,7 +61,7 @@ pub trait FridaRuntimeTuple: MatchFirstType + Debug { fn init_all( &mut self, gum: &Gum, - ranges: &RangeMap, + ranges: &RangeMap, module_map: &Rc, ); @@ -74,7 +79,7 @@ impl FridaRuntimeTuple for () { fn init_all( &mut self, _gum: &Gum, - _ranges: &RangeMap, + _ranges: &RangeMap, _module_map: &Rc, ) { } @@ -96,7 +101,7 @@ where fn init_all( &mut self, gum: &Gum, - ranges: &RangeMap, + ranges: &RangeMap, module_map: &Rc, ) { self.0.init(gum, ranges, module_map); @@ -147,10 +152,37 @@ pub struct FridaInstrumentationHelperBuilder { impl FridaInstrumentationHelperBuilder { /// Create a new [`FridaInstrumentationHelperBuilder`] + #[must_use] pub fn new() -> Self { Self::default() } + /// Load a script + /// + /// See [`Script::new`] for details + #[must_use] + pub fn load_script( + self, + backend: FridaScriptBackend, + path: &Path, + callback: Option, + ) -> Self { + let name = path + .file_name() + .and_then(|name| name.to_str()) + .expect("Failed to get script file name from path: {path:}"); + let script_prefix = include_str!("script.js"); + let file_contents = read_to_string(path).expect("Failed to read script: {path:}"); + let payload = script_prefix.to_string() + &file_contents; + let gum = Gum::obtain(); + let backend = match backend { + FridaScriptBackend::V8 => Backend::obtain_v8(&gum), + FridaScriptBackend::QuickJS => Backend::obtain_qjs(&gum), + }; + Script::load(&backend, name, payload, callback).unwrap(); + self + } + /// Enable or disable the [`Stalker`](https://frida.re/docs/stalker/) /// /// Required for all instrumentation, such as coverage collection, `ASan`, and `CmpLog`. @@ -285,20 +317,23 @@ impl FridaInstrumentationHelperBuilder { module.range().base_address().0 as usize ); let range = module.range(); - let start = range.base_address().0 as usize; - ranges - .borrow_mut() - .insert(start..(start + range.size()), (i as u16, module.path())); + let start = range.base_address().0 as u64; + ranges.borrow_mut().insert( + start..(start + range.size() as u64), + (i as u16, module.path()), + ); } for skip in skip_ranges { match skip { - SkipRange::Absolute(range) => ranges.borrow_mut().remove(range), + SkipRange::Absolute(range) => ranges + .borrow_mut() + .remove(range.start as u64..range.end as u64), SkipRange::ModuleRelative { name, range } => { let module_details = ModuleDetails::with_name(name).unwrap(); - let lib_start = module_details.range().base_address().0 as usize; - ranges - .borrow_mut() - .remove((lib_start + range.start)..(lib_start + range.end)); + let lib_start = module_details.range().base_address().0 as u64; + ranges.borrow_mut().remove( + (lib_start + range.start as u64)..(lib_start + range.end as u64), + ); } } } @@ -356,7 +391,7 @@ impl Default for FridaInstrumentationHelperBuilder { /// An helper that feeds `FridaInProcessExecutor` with edge-coverage instrumentation pub struct FridaInstrumentationHelper<'a, RT: 'a> { transformer: Transformer<'a>, - ranges: Rc>>, + ranges: Rc>>, runtimes: Rc>, stalker_enabled: bool, pub(crate) disable_excludes: bool, @@ -373,17 +408,14 @@ impl Debug for FridaInstrumentationHelper<'_, RT> { } } -/// Helper function to get the size of a module's CODE section from frida -#[must_use] -pub fn get_module_size(module_name: &str) -> usize { - let mut code_size = 0; - let code_size_ref = &mut code_size; - Module::enumerate_ranges(module_name, PageProtection::ReadExecute, move |details| { - *code_size_ref = details.memory_range().size(); - true - }); - - code_size +/// A callback function to test calling back from FRIDA's JavaScript scripting support +/// # Safety +/// This function receives a raw pointer to a C string +#[no_mangle] +pub unsafe extern "C" fn test_function(message: *const gchar) { + if let Ok(msg) = CStr::from_ptr(message).to_str() { + println!("{msg}"); + } } fn pathlist_contains_module(list: I, module: &ModuleDetails) -> bool @@ -404,11 +436,12 @@ where }) } -impl<'a> FridaInstrumentationHelper<'a, ()> { +impl FridaInstrumentationHelper<'_, ()> { /// Create a builder to initialize a [`FridaInstrumentationHelper`]. /// /// See the documentation of [`FridaInstrumentationHelperBuilder`] /// for more details. + #[must_use] pub fn builder() -> FridaInstrumentationHelperBuilder { FridaInstrumentationHelperBuilder::default() } @@ -428,7 +461,7 @@ where .iter() .map(PathBuf::from) .collect::>(); - FridaInstrumentationHelper::builder() + let builder = FridaInstrumentationHelper::builder() .enable_stalker(options.cmplog || options.asan || !options.disable_coverage) .disable_excludes(options.disable_excludes) .instrument_module_if(move |module| pathlist_contains_module(&harness, module)) @@ -440,14 +473,28 @@ where name: name.clone(), range: *offset..*offset + 4, } - })) - .build(gum, runtimes) + })); + + let builder = if let Some(script) = &options.script { + builder.load_script( + options.backend.unwrap_or_default(), + script, + Some(FridaInstrumentationHelper::::script_callback), + ) + } else { + builder + }; + builder.build(gum, runtimes) + } + + fn script_callback(msg: &str, bytes: &[u8]) { + println!("msg: {msg:}, bytes: {bytes:x?}"); } #[allow(clippy::too_many_lines)] fn build_transformer( gum: &'a Gum, - ranges: &Rc>>, + ranges: &Rc>>, runtimes: &Rc>, ) -> Transformer<'a> { let ranges = Rc::clone(ranges); @@ -468,7 +515,7 @@ where fn transform( basic_block: StalkerIterator, output: &StalkerOutput, - ranges: &Rc>>, + ranges: &Rc>>, runtimes_unborrowed: &Rc>, decoder: InstDecoder, ) { @@ -481,7 +528,7 @@ where let address = instr.address(); // log::trace!("x - block @ {:x} transformed to {:x}", address, output.writer().pc()); //the ASAN check needs to be done before the hook_rt check due to x86 insns such as call [mem] - if ranges.borrow().contains_key(&(address as usize)) { + if ranges.borrow().contains_key(&address) { let mut runtimes = (*runtimes_unborrowed).borrow_mut(); if first { first = false; @@ -590,8 +637,8 @@ where { log::trace!("{basic_block_start:#016X}:{basic_block_size:X}"); rt.drcov_basic_blocks.push(DrCovBasicBlock::new( - basic_block_start as usize, - basic_block_start as usize + basic_block_size, + basic_block_start, + basic_block_start + basic_block_size as u64, )); } } @@ -644,6 +691,7 @@ where } /// Returns ref to the Transformer + #[must_use] pub fn transformer(&self) -> &Transformer<'a> { &self.transformer } @@ -652,7 +700,7 @@ where pub fn init( &mut self, gum: &'a Gum, - ranges: &RangeMap, + ranges: &RangeMap, module_map: &Rc, ) { (*self.runtimes) @@ -671,6 +719,7 @@ where } /// If stalker is enabled + #[must_use] pub fn stalker_enabled(&self) -> bool { self.stalker_enabled } @@ -684,12 +733,13 @@ where } /// Ranges - pub fn ranges(&self) -> Ref> { + #[must_use] + pub fn ranges(&self) -> Ref> { self.ranges.borrow() } /// Mutable ranges - pub fn ranges_mut(&mut self) -> RefMut> { + pub fn ranges_mut(&mut self) -> RefMut> { (*self.ranges).borrow_mut() } } diff --git a/libafl_frida/src/lib.rs b/libafl_frida/src/lib.rs index 90888dcd5c..acc6a148be 100644 --- a/libafl_frida/src/lib.rs +++ b/libafl_frida/src/lib.rs @@ -1,30 +1,12 @@ /*! The [`Frida`](https://frida.re) executor is a binary-only mode for `LibAFL`. + It can report coverage and, on supported architectures, even reports memory access errors. Additional documentation is available in [the `LibAFL` book](https://aflplus.plus/libafl-book/advanced_features/frida.html). */ #![cfg_attr(feature = "document-features", doc = document_features::document_features!())] -#![forbid(unexpected_cfgs)] -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(clippy::all)] -#![deny(clippy::pedantic)] -#![allow( - clippy::unreadable_literal, - clippy::type_repetition_in_bounds, - clippy::missing_errors_doc, - clippy::cast_possible_truncation, - clippy::used_underscore_binding, - clippy::ptr_as_ptr, - clippy::missing_panics_doc, - clippy::missing_docs_in_private_items, - clippy::module_name_repetitions, - clippy::unreadable_literal, - clippy::ptr_cast_constness, - clippy::must_use_candidate, - clippy::too_many_arguments -)] #![cfg_attr(not(test), warn( missing_debug_implementations, missing_docs, @@ -81,7 +63,6 @@ pub mod coverage_rt; pub mod pthread_hook; #[cfg(feature = "cmplog")] -/// The frida cmplog runtime pub mod cmplog_rt; /// The `LibAFL` firda helper @@ -96,8 +77,8 @@ pub mod executor; pub mod utils; // for parsing asan and cmplog cores -use libafl_bolts::core_affinity::{get_core_ids, CoreId, Cores}; +use libafl_bolts::core_affinity::{get_core_ids, CoreId, Cores}; /// A representation of the various Frida options #[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] #[allow(clippy::struct_excessive_bools)] @@ -345,6 +326,7 @@ impl Default for FridaOptions { #[cfg(test)] mod tests { + use core::num::NonZero; use std::sync::OnceLock; use clap::Parser; @@ -523,7 +505,10 @@ mod tests { ); let mutator = StdScheduledMutator::new(tuple_list!(BitFlipMutator::new())); - let mut stages = tuple_list!(StdMutationalStage::with_max_iterations(mutator, 1)); + let mut stages = tuple_list!(StdMutationalStage::with_max_iterations( + mutator, + NonZero::new(1).unwrap() + )); log::info!("Starting fuzzing!"); fuzzer @@ -564,17 +549,23 @@ mod tests { SimpleStdoutLogger::set_logger().unwrap(); + let out_dir = std::env::var_os("OUT_DIR").unwrap(); + let out_dir = out_dir.to_string_lossy().to_string(); // Check if the harness dynamic library is present, if not - skip the test #[cfg(unix)] - let test_harness = "./test_harness.so"; + let test_harness_name = "test_harness.so"; #[cfg(windows)] - let test_harness = ".\\test_harness.dll"; + let test_harness_name = "test_harness.dll"; + + let test_harness = std::path::Path::new(&out_dir).join(test_harness_name); + assert!( - std::path::Path::new(test_harness).exists(), - "Skipping test, {test_harness} not found" + test_harness.exists(), + "Skipping test, {} not found", + test_harness.to_str().unwrap() ); - GUM.set(unsafe { Gum::obtain() }) + GUM.set(Gum::obtain()) .unwrap_or_else(|_| panic!("Failed to initialize Gum")); let simulated_args = vec![ "libafl_frida_test", @@ -582,7 +573,7 @@ mod tests { "--disable-excludes", "--continue-on-error", "-H", - test_harness, + test_harness.to_str().unwrap(), ]; let options: FuzzerOptions = FuzzerOptions::try_parse_from(simulated_args).unwrap(); unsafe { test_asan(&options) } diff --git a/libafl_frida/src/pthread_hook.rs b/libafl_frida/src/pthread_hook.rs index c436251e8b..284a7bcdaa 100644 --- a/libafl_frida/src/pthread_hook.rs +++ b/libafl_frida/src/pthread_hook.rs @@ -63,10 +63,17 @@ impl PreviousHook { unsafe impl Sync for PreviousHook {} // TODO: This could use a RwLock as well +/// The previous hook static mut PREVIOUS_HOOK: PreviousHook = PreviousHook(std::ptr::null()); +/// The currently set hook static CURRENT_HOOK: RwLock> = RwLock::new(None); +/// Get the pointer to the previous hook, mut +fn previous_hook_ptr_mut() -> *mut PreviousHook { + &raw mut PREVIOUS_HOOK +} + extern "C" fn pthread_introspection_hook( event: libc::c_uint, thread: libc::pthread_t, @@ -76,7 +83,7 @@ extern "C" fn pthread_introspection_hook( if let Some(ref hook) = *CURRENT_HOOK.read().unwrap() { hook(event.try_into().unwrap(), thread, addr, size); } - unsafe { PREVIOUS_HOOK.dispatch(event, thread, addr, size) }; + unsafe { (*previous_hook_ptr_mut()).dispatch(event, thread, addr, size) }; } /// Closure type for `pthread_introspection` hooks. @@ -159,7 +166,7 @@ where // Allow because we're sure this isn't from a different code generation unit. if !(prev).is_null() && prev != pthread_introspection_hook as _ { unsafe { - PREVIOUS_HOOK.set(prev as *const pthread_introspection_hook_t); + (*previous_hook_ptr_mut()).set(prev as *const pthread_introspection_hook_t); } } } @@ -176,7 +183,9 @@ where /// # Safety /// Potential data race when if called at the same time as `install` or `reset` from another thread pub unsafe fn reset() { - unsafe { PREVIOUS_HOOK.reset() }; + unsafe { + (*previous_hook_ptr_mut()).reset(); + }; } /// The following tests fail if they are not run sequentially. diff --git a/libafl_frida/src/script.js b/libafl_frida/src/script.js new file mode 100644 index 0000000000..6b4be28a3e --- /dev/null +++ b/libafl_frida/src/script.js @@ -0,0 +1,13 @@ +"use strict"; +class LibAfl { + static testFunction(message) { + const buf = Memory.allocUtf8String(message); + LibAfl.jsApiTestFunction(buf); + } + + static jsApiGetFunction(name, retType, argTypes) { + const addr = Module.getExportByName(null, name); + return new NativeFunction(addr, retType, argTypes); + } +}; +LibAfl.jsApiTestFunction = LibAfl.jsApiGetFunction("test_function", "void", ["pointer"]); diff --git a/libafl_frida/src/utils.rs b/libafl_frida/src/utils.rs index 5f5000c220..364b32a527 100644 --- a/libafl_frida/src/utils.rs +++ b/libafl_frida/src/utils.rs @@ -162,6 +162,7 @@ const X86_64_REGS: [(RegSpec, X86Register); 34] = [ /// Get the value of a register given a context #[cfg(target_arch = "x86_64")] +#[must_use] pub fn get_register(context: &CpuContext, reg: X86Register) -> u64 { match reg { X86Register::Rax => context.rax(), @@ -184,9 +185,10 @@ pub fn get_register(context: &CpuContext, reg: X86Register) -> u64 { } } -/// The writer registers -/// frida registers: -/// capstone registers: +/// The writer registers. +/// +/// `FRIDA` registers: +/// `capstone` registers: #[cfg(target_arch = "x86_64")] #[must_use] #[inline] @@ -201,7 +203,7 @@ pub fn writer_register(reg: RegSpec) -> X86Register { X86Register::None } -/// Translates a frida instruction to a disassembled instruction. +/// Translates a `FRIDA` instruction to a disassembled instruction. #[cfg(target_arch = "x86_64")] pub(crate) fn frida_to_cs( decoder: InstDecoder, @@ -223,42 +225,38 @@ pub(crate) fn frida_to_cs( } } +/// Get the `base`, `idx`, `scale`, `disp` for each operand #[cfg(target_arch = "x86_64")] -/// Get the base, idx, scale, disp for each operand +#[must_use] pub fn operand_details(operand: &Operand) -> Option<(X86Register, X86Register, u8, i32)> { match operand { - Operand::RegDeref(base) => { + Operand::MemDeref { base } => { let base = writer_register(*base); Some((base, X86Register::None, 0, 0)) } - Operand::RegDisp(base, disp) => { + Operand::Disp { base, disp } => { let base = writer_register(*base); Some((base, X86Register::None, 0, *disp)) } - Operand::RegScale(base, scale) => { - let base = writer_register(*base); - Some((base, X86Register::None, *scale, 0)) - } - Operand::RegIndexBase(base, index) => { - let base = writer_register(*base); + Operand::MemIndexScale { index, scale } => { let index = writer_register(*index); - Some((base, index, 0, 0)) + Some((X86Register::None, index, *scale, 0)) } - Operand::RegIndexBaseDisp(base, index, disp) => { - let base = writer_register(*base); + Operand::MemIndexScaleDisp { index, scale, disp } => { let index = writer_register(*index); - Some((base, index, 0, *disp)) + Some((X86Register::None, index, *scale, *disp)) } - Operand::RegScaleDisp(base, scale, disp) => { - let base = writer_register(*base); - Some((base, X86Register::None, *scale, *disp)) - } - Operand::RegIndexBaseScale(base, index, scale) => { + Operand::MemBaseIndexScale { base, index, scale } => { let base = writer_register(*base); let index = writer_register(*index); Some((base, index, *scale, 0)) } - Operand::RegIndexBaseScaleDisp(base, index, scale, disp) => { + Operand::MemBaseIndexScaleDisp { + base, + index, + scale, + disp, + } => { let base = writer_register(*base); let index = writer_register(*index); Some((base, index, *scale, *disp)) @@ -267,19 +265,20 @@ pub fn operand_details(operand: &Operand) -> Option<(X86Register, X86Register, u } } -#[cfg(target_arch = "x86_64")] /// Get the immediate value of the operand +#[cfg(target_arch = "x86_64")] +#[must_use] pub fn immediate_value(operand: &Operand) -> Option { match operand { - Operand::ImmediateI8(v) => Some(i64::from(*v)), - Operand::ImmediateU8(v) => Some(i64::from(*v)), - Operand::ImmediateI16(v) => Some(i64::from(*v)), - Operand::ImmediateI32(v) => Some(i64::from(*v)), - Operand::ImmediateU16(v) => Some(i64::from(*v)), - Operand::ImmediateU32(v) => Some(i64::from(*v)), - Operand::ImmediateI64(v) => Some(*v), + Operand::ImmediateI8 { imm } => Some(i64::from(*imm)), + Operand::ImmediateU8 { imm } => Some(i64::from(*imm)), + Operand::ImmediateI16 { imm } => Some(i64::from(*imm)), + Operand::ImmediateU16 { imm } => Some(i64::from(*imm)), + Operand::ImmediateI32 { imm } => Some(i64::from(*imm)), + Operand::ImmediateU32 { imm } => Some(i64::from(*imm)), + Operand::ImmediateI64 { imm } => Some(*imm), #[allow(clippy::cast_possible_wrap)] - Operand::ImmediateU64(v) => Some(*v as i64), + Operand::ImmediateU64 { imm } => Some(*imm as i64), _ => None, } } @@ -294,8 +293,9 @@ pub enum AccessType { Write, } -#[cfg(target_arch = "x86_64")] /// Disassemble "count" number of instructions +#[cfg(target_arch = "x86_64")] +#[must_use] pub fn disas_count(decoder: &InstDecoder, data: &[u8], count: usize) -> Vec { let mut counter = count; let mut ret = vec![]; @@ -316,6 +316,7 @@ pub fn disas_count(decoder: &InstDecoder, data: &[u8], count: usize) -> Vec Vec { let mut ret = vec![]; diff --git a/libafl_frida/src/windows_hooks.rs b/libafl_frida/src/windows_hooks.rs index 9ceb2b06b7..f02e2f2f12 100644 --- a/libafl_frida/src/windows_hooks.rs +++ b/libafl_frida/src/windows_hooks.rs @@ -21,15 +21,16 @@ unsafe extern "C" fn unhandled_exception_filter_detour( } /// Initialize the hooks pub fn initialize(gum: &Gum) { + let module = Module::obtain(gum); let is_processor_feature_present = - Module::find_export_by_name(Some("kernel32.dll"), "IsProcessorFeaturePresent"); + module.find_export_by_name(Some("kernel32.dll"), "IsProcessorFeaturePresent"); let is_processor_feature_present = is_processor_feature_present.unwrap(); assert!( !is_processor_feature_present.is_null(), "IsProcessorFeaturePresent not found" ); let unhandled_exception_filter = - Module::find_export_by_name(Some("kernel32.dll"), "UnhandledExceptionFilter"); + module.find_export_by_name(Some("kernel32.dll"), "UnhandledExceptionFilter"); let unhandled_exception_filter = unhandled_exception_filter.unwrap(); assert!( !unhandled_exception_filter.is_null(), diff --git a/libafl_frida/test_harness.cpp b/libafl_frida/test_harness.cpp index fd85df219f..0344727942 100644 --- a/libafl_frida/test_harness.cpp +++ b/libafl_frida/test_harness.cpp @@ -4,7 +4,8 @@ #ifdef _MSC_VER #include - + #include + #include BOOL APIENTRY DllMain(HANDLE hModule, DWORD ul_reason_for_call, LPVOID lpReserved) { (void)hModule; @@ -13,7 +14,7 @@ BOOL APIENTRY DllMain(HANDLE hModule, DWORD ul_reason_for_call, return TRUE; } - #define EXTERN extern "C" __declspec(dllexport) + #define EXTERN extern "C" __declspec(dllexport) #else #define EXTERN extern "C" { @@ -37,9 +38,6 @@ EXTERN int heap_uaf_write(const uint8_t *_data, size_t _size) { return 0; } -#include -#include - static volatile bool stop = false; EXTERN int heap_oob_read(const uint8_t *_data, size_t _size) { diff --git a/libafl_intelpt/Cargo.toml b/libafl_intelpt/Cargo.toml new file mode 100644 index 0000000000..200882507f --- /dev/null +++ b/libafl_intelpt/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "libafl_intelpt" +version.workspace = true +authors = ["Marco Cavenati "] +description = "Intel Processor Trace wrapper for libafl" +repository = "https://github.com/AFLplusplus/LibAFL/" +edition = "2021" +license.workspace = true +readme = "./README.md" +keywords = ["fuzzing", "testing", "security", "intelpt"] +categories = ["development-tools::testing", "no-std"] + +[features] +default = ["std", "libipt"] +std = ["libafl_bolts/std"] + +libipt = ["std", "dep:libipt"] + +[dev-dependencies] +static_assertions = { workspace = true } + +[target.'cfg(target_os = "linux" )'.dev-dependencies] +nix = { workspace = true } +proc-maps = "0.4.0" + +[dependencies] +arbitrary-int = { workspace = true } +bitbybit = { workspace = true } +libafl_bolts = { workspace = true } +libc = { workspace = true } +libipt = { workspace = true, optional = true } +log = { workspace = true } +num_enum = { workspace = true, default-features = false } +num-traits = { workspace = true, default-features = false } +raw-cpuid = { version = "11.1.0" } + +[target.'cfg(target_os = "linux" )'.dependencies] +caps = { version = "0.5.5" } +perf-event-open-sys = { version = "4.0.0" } + +[lints] +workspace = true diff --git a/libafl_intelpt/README.md b/libafl_intelpt/README.md new file mode 100644 index 0000000000..237d591a8b --- /dev/null +++ b/libafl_intelpt/README.md @@ -0,0 +1,5 @@ +# Intel Processor Trace (PT) low level code + +This module is a wrapper around the IntelPT kernel driver, exposing functionalities specifically crafted for libafl. + +At the moment only linux hosts are supported. diff --git a/libafl_intelpt/src/lib.rs b/libafl_intelpt/src/lib.rs new file mode 100644 index 0000000000..c6a0a140c9 --- /dev/null +++ b/libafl_intelpt/src/lib.rs @@ -0,0 +1,1028 @@ +//! Intel Processor Trace (PT) low level code +//! +//! This crate interacts with the linux kernel (specifically with perf) and therefore it only works +//! on linux hosts + +// Just in case this crate will have real no_std support in the future +#![no_std] +#![cfg(target_arch = "x86_64")] +#![cfg(feature = "std")] +#![cfg(feature = "libipt")] + +#[macro_use] +extern crate std; + +use std::{ + borrow::ToOwned, + string::{String, ToString}, + vec::Vec, +}; +#[cfg(target_os = "linux")] +use std::{ + ffi::{CStr, CString}, + fmt::Debug, + format, fs, + ops::RangeInclusive, + os::{ + fd::{AsRawFd, FromRawFd, OwnedFd}, + raw::c_void, + }, + path::Path, + ptr, slice, + sync::LazyLock, +}; + +#[cfg(target_os = "linux")] +use arbitrary_int::u4; +#[cfg(target_os = "linux")] +use bitbybit::bitfield; +#[cfg(target_os = "linux")] +use caps::{CapSet, Capability}; +#[cfg(target_os = "linux")] +use libafl_bolts::ownedref::OwnedRefMut; +use libafl_bolts::Error; +use libipt::PtError; +#[cfg(target_os = "linux")] +use libipt::{ + block::BlockDecoder, AddrConfig, AddrFilter, AddrFilterBuilder, AddrRange, BlockFlags, + ConfigBuilder, Cpu, Image, PtErrorCode, Status, +}; +#[cfg(target_os = "linux")] +use num_enum::TryFromPrimitive; +#[cfg(target_os = "linux")] +use num_traits::{Euclid, SaturatingAdd}; +#[cfg(target_os = "linux")] +use perf_event_open_sys::{ + bindings::{perf_event_attr, perf_event_mmap_page, PERF_FLAG_FD_CLOEXEC}, + ioctls::{DISABLE, ENABLE, SET_FILTER}, + perf_event_open, +}; +use raw_cpuid::CpuId; + +/// Size of a memory page +pub const PAGE_SIZE: usize = 4096; + +#[cfg(target_os = "linux")] +const PT_EVENT_PATH: &str = "/sys/bus/event_source/devices/intel_pt"; + +#[cfg(target_os = "linux")] +static NR_ADDR_FILTERS: LazyLock> = LazyLock::new(|| { + // This info is available in two different files, use the second path as fail-over + let path = format!("{PT_EVENT_PATH}/nr_addr_filters"); + let path2 = format!("{PT_EVENT_PATH}/caps/num_address_ranges"); + let err = format!("Failed to read Intel PT number of address filters from {path} and {path2}"); + + let s = fs::read_to_string(&path); + if let Ok(s) = s { + let n = s.trim().parse::(); + if let Ok(n) = n { + return Ok(n); + } + } + + let s2 = fs::read_to_string(&path2).map_err(|_| err.clone())?; + s2.trim().parse::().map_err(|_| err) +}); + +#[cfg(target_os = "linux")] +static CURRENT_CPU: LazyLock> = LazyLock::new(|| { + let cpuid = CpuId::new(); + cpuid + .get_feature_info() + .map(|fi| Cpu::intel(fi.family_id().into(), fi.model_id(), fi.stepping_id())) +}); + +#[cfg(target_os = "linux")] +static PERF_EVENT_TYPE: LazyLock> = LazyLock::new(|| { + let path = format!("{PT_EVENT_PATH}/type"); + let s = fs::read_to_string(&path) + .map_err(|_| format!("Failed to read Intel PT perf event type from {path}"))?; + s.trim() + .parse::() + .map_err(|_| format!("Failed to parse Intel PT perf event type in {path}")) +}); + +/// Intel PT mode of operation with KVM +/// +/// Check out +/// for more details +#[cfg(target_os = "linux")] +#[derive(TryFromPrimitive, Debug)] +#[repr(i32)] +pub enum KvmPTMode { + /// trace both host/guest and output to host buffer + System = 0, + /// trace host and guest simultaneously and output to their respective buffer + HostGuest = 1, +} + +/// Intel Processor Trace (PT) +#[cfg(target_os = "linux")] +#[derive(Debug)] +pub struct IntelPT { + fd: OwnedFd, + perf_buffer: *mut c_void, + perf_aux_buffer: *mut c_void, + perf_buffer_size: usize, + perf_aux_buffer_size: usize, + aux_head: *mut u64, + aux_tail: *mut u64, + previous_decode_head: u64, + ip_filters: Vec>, +} + +#[cfg(target_os = "linux")] +impl IntelPT { + /// Create a default builder + /// + /// Checkout [`IntelPTBuilder::default()`] for more details + #[must_use] + pub fn builder() -> IntelPTBuilder { + IntelPTBuilder::default() + } + + /// Set filters based on Instruction Pointer (IP) + /// + /// Only instructions in `filters` ranges will be traced. + pub fn set_ip_filters(&mut self, filters: &[RangeInclusive]) -> Result<(), Error> { + let str_filter = filters + .iter() + .map(|filter| { + let size = filter.end() - filter.start(); + format!("filter {:#016x}/{:#016x} ", filter.start(), size) + }) + .reduce(|acc, s| acc + &s) + .unwrap_or_default(); + + // SAFETY: CString::from_vec_unchecked is safe because no null bytes are added to str_filter + let c_str_filter = unsafe { CString::from_vec_unchecked(str_filter.into_bytes()) }; + match unsafe { SET_FILTER(self.fd.as_raw_fd(), c_str_filter.into_raw()) } { + -1 => { + let availability = match availability() { + Ok(()) => String::new(), + Err(reasons) => format!(" Possible reasons: {reasons}"), + }; + Err(Error::last_os_error(format!( + "Failed to set IP filters.{availability}" + ))) + } + 0 => { + self.ip_filters = filters.to_vec(); + Ok(()) + } + ret => Err(Error::unsupported(format!( + "Failed to set IP filter, ioctl returned unexpected value {ret}" + ))), + } + } + + fn ip_filters_to_addr_filter(&self) -> AddrFilter { + let mut builder = AddrFilterBuilder::new(); + let mut iter = self + .ip_filters + .iter() + .map(|f| AddrRange::new(*f.start() as u64, *f.end() as u64, AddrConfig::FILTER)); + if let Some(f) = iter.next() { + builder.addr0(f); + if let Some(f) = iter.next() { + builder.addr1(f); + if let Some(f) = iter.next() { + builder.addr2(f); + if let Some(f) = iter.next() { + builder.addr3(f); + } + } + } + } + builder.finish() + } + + /// Start tracing + /// + /// Be aware that the tracing is not started on [`IntelPT`] construction. + pub fn enable_tracing(&mut self) -> Result<(), Error> { + match unsafe { ENABLE(self.fd.as_raw_fd(), 0) } { + -1 => { + let availability = match availability() { + Ok(()) => String::new(), + Err(reasons) => format!(" Possible reasons: {reasons}"), + }; + Err(Error::last_os_error(format!( + "Failed to enable tracing.{availability}" + ))) + } + 0 => Ok(()), + ret => Err(Error::unsupported(format!( + "Failed to enable tracing, ioctl returned unexpected value {ret}" + ))), + } + } + + /// Stop tracing + /// + /// This doesn't drop [`IntelPT`], the configuration will be preserved. + pub fn disable_tracing(&mut self) -> Result<(), Error> { + match unsafe { DISABLE(self.fd.as_raw_fd(), 0) } { + -1 => Err(Error::last_os_error("Failed to disable tracing")), + 0 => Ok(()), + ret => Err(Error::unsupported(format!( + "Failed to disable tracing, ioctl returned unexpected value {ret}" + ))), + } + } + + // // let read_mem = |buf: &mut [u8], addr: u64| { + // // let src = addr as *const u8; + // // let dst = buf.as_mut_ptr(); + // // let size = buf.len(); + // // unsafe { + // // ptr::copy_nonoverlapping(src, dst, size); + // // } + // // }; + // #[allow(clippy::cast_possible_wrap)] + // fn decode_with_callback( + // &mut self, + // read_memory: F, + // copy_buffer: Option<&mut Vec>, + // ) -> Result, Error> { + // self.decode( + // Some(|buff: &mut [u8], addr: u64, _: Asid| { + // debug_assert!(i32::try_from(buff.len()).is_ok()); + // read_memory(buff, addr); + // buff.len() as i32 + // }), + // None, + // copy_buffer, + // ) + // } + + /// Fill the coverage map by decoding the PT traces + /// + /// This function consumes the traces. + pub fn decode_traces_into_map( + &mut self, + image: &mut Image, + map: &mut [T], + ) -> Result<(), Error> + where + T: SaturatingAdd + From + Debug, + { + let head = unsafe { self.aux_head.read_volatile() }; + let tail = unsafe { self.aux_tail.read_volatile() }; + if head < tail { + return Err(Error::unknown( + "Intel PT: aux buffer head is behind aux tail.", + )); + }; + if self.previous_decode_head < tail { + return Err(Error::unknown( + "Intel PT: aux previous head is behind aux tail.", + )); + }; + let len = (head - tail) as usize; + if len >= self.perf_aux_buffer_size { + log::warn!( + "The fuzzer run filled the entire PT buffer. Consider increasing the aux buffer \ + size or refining the IP filters." + ); + } + let skip = self.previous_decode_head - tail; + + let head_wrap = wrap_aux_pointer(head, self.perf_aux_buffer_size); + let tail_wrap = wrap_aux_pointer(tail, self.perf_aux_buffer_size); + + // after reading the data_head value, user space should issue an rmb() + // https://manpages.debian.org/bookworm/manpages-dev/perf_event_open.2.en.html#data_head + smp_rmb(); + + let mut data = if head_wrap >= tail_wrap { + unsafe { + let ptr = self.perf_aux_buffer.add(tail_wrap as usize) as *mut u8; + OwnedRefMut::Ref(slice::from_raw_parts_mut(ptr, len)) + } + } else { + // Head pointer wrapped, the trace is split + unsafe { self.join_split_trace(head_wrap, tail_wrap) } + }; + + let mut config = ConfigBuilder::new(data.as_mut()).map_err(error_from_pt_error)?; + config.filter(self.ip_filters_to_addr_filter()); + if let Some(cpu) = &*CURRENT_CPU { + config.cpu(*cpu); + } + let flags = BlockFlags::END_ON_CALL.union(BlockFlags::END_ON_JUMP); + config.flags(flags); + let mut decoder = BlockDecoder::new(&config.finish()).map_err(error_from_pt_error)?; + decoder + .set_image(Some(image)) + .map_err(error_from_pt_error)?; + + let mut previous_block_end_ip = 0; + let mut status; + 'sync: loop { + match decoder.sync_forward() { + Ok(s) => { + status = s; + Self::decode_blocks( + &mut decoder, + &mut status, + &mut previous_block_end_ip, + skip, + map, + )?; + } + Err(e) => { + if e.code() != PtErrorCode::Eos { + log::trace!("PT error in sync forward {e:?}"); + } + break 'sync; + } + }; + } + + // Advance the trace pointer up to the latest sync point, otherwise next execution's trace + // might not contain a PSB packet. + decoder.sync_backward().map_err(error_from_pt_error)?; + let offset = decoder.sync_offset().map_err(error_from_pt_error)?; + unsafe { self.aux_tail.write_volatile(tail + offset) }; + self.previous_decode_head = head; + Ok(()) + } + + #[inline] + #[must_use] + unsafe fn join_split_trace(&self, head_wrap: u64, tail_wrap: u64) -> OwnedRefMut<[u8]> { + let first_ptr = self.perf_aux_buffer.add(tail_wrap as usize) as *mut u8; + let first_len = self.perf_aux_buffer_size - tail_wrap as usize; + let second_ptr = self.perf_aux_buffer as *mut u8; + let second_len = head_wrap as usize; + OwnedRefMut::Owned( + [ + slice::from_raw_parts(first_ptr, first_len), + slice::from_raw_parts(second_ptr, second_len), + ] + .concat() + .into_boxed_slice(), + ) + } + + #[inline] + fn decode_blocks( + decoder: &mut BlockDecoder<()>, + status: &mut Status, + previous_block_end_ip: &mut u64, + skip: u64, + map: &mut [T], + ) -> Result<(), Error> + where + T: SaturatingAdd + From + Debug, + { + 'block: loop { + while status.event_pending() { + match decoder.event() { + Ok((_, s)) => { + *status = s; + } + Err(e) => { + log::trace!("PT error in event {e:?}"); + break 'block; + } + }; + } + + match decoder.next() { + Ok((b, s)) => { + *status = s; + let offset = decoder.offset().map_err(error_from_pt_error)?; + + if !b.speculative() && skip < offset { + let id = hash_me(*previous_block_end_ip) ^ hash_me(b.ip()); + // SAFETY: the index is < map.len() since the modulo operation is applied + let map_loc = unsafe { map.get_unchecked_mut(id as usize % map.len()) }; + *map_loc = (*map_loc).saturating_add(&1u8.into()); + + *previous_block_end_ip = b.end_ip(); + } + } + Err(e) => { + if e.code() != PtErrorCode::Eos { + log::trace!("PT error in block next {e:?}"); + } + } + } + if status.eos() { + break 'block; + } + } + Ok(()) + } +} + +#[cfg(target_os = "linux")] +impl Drop for IntelPT { + fn drop(&mut self) { + unsafe { + let ret = libc::munmap(self.perf_aux_buffer, self.perf_aux_buffer_size); + assert_eq!(ret, 0, "Intel PT: Failed to unmap perf aux buffer"); + let ret = libc::munmap(self.perf_buffer, self.perf_buffer_size); + assert_eq!(ret, 0, "Intel PT: Failed to unmap perf buffer"); + } + } +} + +/// Builder for [`IntelPT`] +#[cfg(target_os = "linux")] +#[derive(Debug, Clone, PartialEq)] +pub struct IntelPTBuilder { + pid: Option, + cpu: i32, + exclude_kernel: bool, + exclude_hv: bool, + inherit: bool, + perf_buffer_size: usize, + perf_aux_buffer_size: usize, +} + +#[cfg(target_os = "linux")] +impl Default for IntelPTBuilder { + /// Create a default builder for [`IntelPT`] + /// + /// The default configuration corresponds to: + /// ```rust + /// use libafl_intelpt::{IntelPTBuilder, PAGE_SIZE}; + /// let builder = unsafe { std::mem::zeroed::() } + /// .pid(None) + /// .all_cpus() + /// .exclude_kernel(true) + /// .exclude_hv(true) + /// .inherit(false) + /// .perf_buffer_size(128 * PAGE_SIZE + PAGE_SIZE).unwrap() + /// .perf_aux_buffer_size(2 * 1024 * 1024).unwrap(); + /// assert_eq!(builder, IntelPTBuilder::default()); + /// ``` + fn default() -> Self { + Self { + pid: None, + cpu: -1, + exclude_kernel: true, + exclude_hv: true, + inherit: false, + perf_buffer_size: 128 * PAGE_SIZE + PAGE_SIZE, + perf_aux_buffer_size: 2 * 1024 * 1024, + } + } +} + +#[cfg(target_os = "linux")] +impl IntelPTBuilder { + /// Build the [`IntelPT`] struct + pub fn build(&self) -> Result { + self.check_config(); + let mut perf_event_attr = new_perf_event_attr_intel_pt()?; + perf_event_attr.set_exclude_kernel(self.exclude_kernel.into()); + perf_event_attr.set_exclude_hv(self.exclude_hv.into()); + perf_event_attr.set_inherit(self.inherit.into()); + + // SAFETY: perf_event_attr is properly initialized + let fd = match unsafe { + perf_event_open( + ptr::from_mut(&mut perf_event_attr), + self.pid.unwrap_or(0), + self.cpu, + -1, + PERF_FLAG_FD_CLOEXEC.into(), + ) + } { + -1 => { + let availability = match availability() { + Ok(()) => String::new(), + Err(reasons) => format!(" Possible reasons: {reasons}"), + }; + return Err(Error::last_os_error(format!( + "Failed to open Intel PT perf event.{availability}" + ))); + } + fd => { + // SAFETY: On success, perf_event_open() returns a new file descriptor. + // On error, -1 is returned, and it is checked above + unsafe { OwnedFd::from_raw_fd(fd) } + } + }; + + let perf_buffer = setup_perf_buffer(&fd, self.perf_buffer_size)?; + + // the first perf_buff page is a metadata page + let buff_metadata = perf_buffer.cast::(); + let aux_offset = unsafe { &raw mut (*buff_metadata).aux_offset }; + let aux_size = unsafe { &raw mut (*buff_metadata).aux_size }; + let data_offset = unsafe { &raw mut (*buff_metadata).data_offset }; + let data_size = unsafe { &raw mut (*buff_metadata).data_size }; + + unsafe { + aux_offset.write_volatile(next_page_aligned_addr( + data_offset.read_volatile() + data_size.read_volatile(), + )); + aux_size.write_volatile(self.perf_aux_buffer_size as u64); + } + + let perf_aux_buffer = unsafe { + setup_perf_aux_buffer(&fd, aux_size.read_volatile(), aux_offset.read_volatile())? + }; + + let aux_head = unsafe { &raw mut (*buff_metadata).aux_head }; + let aux_tail = unsafe { &raw mut (*buff_metadata).aux_tail }; + + let ip_filters = Vec::with_capacity(*NR_ADDR_FILTERS.as_ref().unwrap_or(&0) as usize); + + Ok(IntelPT { + fd, + perf_buffer, + perf_aux_buffer, + perf_buffer_size: self.perf_buffer_size, + perf_aux_buffer_size: self.perf_aux_buffer_size, + aux_head, + aux_tail, + previous_decode_head: 0, + ip_filters, + }) + } + + /// Warn if the configuration is not recommended + #[inline] + fn check_config(&self) { + if self.inherit && self.cpu == -1 { + log::warn!( + "IntelPT set up on all CPUs with process inheritance enabled. This configuration \ + is not recommended and might not work as expected" + ); + } + } + + #[must_use] + /// Set the process to be traced via its `PID`. Set to `None` to trace the current process. + pub fn pid(mut self, pid: Option) -> Self { + self.pid = pid; + self + } + + #[must_use] + /// Set the CPU to be traced + /// + /// # Panics + /// + /// The function will panic if `cpu` is greater than `i32::MAX` + pub fn cpu(mut self, cpu: usize) -> Self { + self.cpu = cpu.try_into().unwrap(); + self + } + + #[must_use] + /// Trace all the CPUs + pub fn all_cpus(mut self) -> Self { + self.cpu = -1; + self + } + + #[must_use] + /// Do not trace kernel code + pub fn exclude_kernel(mut self, exclude_kernel: bool) -> Self { + self.exclude_kernel = exclude_kernel; + self + } + + #[must_use] + /// Do not trace Hypervisor code + pub fn exclude_hv(mut self, exclude_hv: bool) -> Self { + self.exclude_hv = exclude_hv; + self + } + + #[must_use] + /// Child processes are traced + pub fn inherit(mut self, inherit: bool) -> Self { + self.inherit = inherit; + self + } + + /// Set the size of the perf buffer + pub fn perf_buffer_size(mut self, perf_buffer_size: usize) -> Result { + let err = Err(Error::illegal_argument( + "IntelPT perf_buffer_size should be 1+2^n pages", + )); + if perf_buffer_size < PAGE_SIZE { + return err; + } + let (q, r) = (perf_buffer_size - PAGE_SIZE).div_rem_euclid(&PAGE_SIZE); + if !q.is_power_of_two() || r != 0 { + return err; + } + + self.perf_buffer_size = perf_buffer_size; + Ok(self) + } + + /// Set the size of the perf aux buffer (actual PT traces buffer) + pub fn perf_aux_buffer_size(mut self, perf_aux_buffer_size: usize) -> Result { + // todo:replace with is_multiple_of once stable + if perf_aux_buffer_size % PAGE_SIZE != 0 { + return Err(Error::illegal_argument( + "IntelPT perf_aux_buffer must be page aligned", + )); + } + if !perf_aux_buffer_size.is_power_of_two() { + return Err(Error::illegal_argument( + "IntelPT perf_aux_buffer must be a power of two", + )); + } + + self.perf_aux_buffer_size = perf_aux_buffer_size; + Ok(self) + } +} + +/// Perf event config for `IntelPT` +/// +/// (This is almost mapped to `IA32_RTIT_CTL MSR` by perf) +#[cfg(target_os = "linux")] +#[bitfield(u64, default = 0)] +struct PtConfig { + /// Disable call return address compression. AKA DisRETC in Intel SDM. + #[bit(11, rw)] + noretcomp: bool, + /// Indicates the frequency of PSB packets. AKA PSBFreq in Intel SDM. + #[bits(24..=27, rw)] + psb_period: u4, +} + +/// Number of address filters available on the running CPU +#[cfg(target_os = "linux")] +pub fn nr_addr_filters() -> Result { + NR_ADDR_FILTERS.clone() +} + +/// Check if Intel PT is available on the current system. +/// +/// Returns `Ok(())` if Intel PT is available and has the features used by `LibAFL`, otherwise +/// returns an `Err` containing a description of the reasons. +/// +/// If you use this with QEMU check out [`Self::availability_in_qemu()`] instead. +/// +/// Due to the numerous factors that can affect `IntelPT` availability, this function was +/// developed on a best-effort basis. +/// The outcome of these checks does not fully guarantee whether `IntelPT` will function or not. +pub fn availability() -> Result<(), String> { + let mut reasons = Vec::new(); + + let cpuid = CpuId::new(); + if let Some(vendor) = cpuid.get_vendor_info() { + if vendor.as_str() != "GenuineIntel" && vendor.as_str() != "GenuineIotel" { + reasons.push("Only Intel CPUs are supported".to_owned()); + } + } else { + reasons.push("Failed to read CPU vendor".to_owned()); + } + + if let Some(ef) = cpuid.get_extended_feature_info() { + if !ef.has_processor_trace() { + reasons.push("Intel PT is not supported by the CPU".to_owned()); + } + } else { + reasons.push("Failed to read CPU Extended Features".to_owned()); + } + + #[cfg(target_os = "linux")] + if let Err(r) = availability_in_linux() { + reasons.push(r); + } + #[cfg(not(target_os = "linux"))] + reasons.push("Only linux hosts are supported at the moment".to_owned()); + + if reasons.is_empty() { + Ok(()) + } else { + Err(reasons.join("; ")) + } +} + +/// Check if Intel PT is available on the current system and can be used in combination with +/// QEMU. +/// +/// If you don't use this with QEMU check out [`IntelPT::availability()`] instead. +pub fn availability_in_qemu_kvm() -> Result<(), String> { + let mut reasons = match availability() { + Err(s) => vec![s], + Ok(()) => Vec::new(), + }; + + #[cfg(target_os = "linux")] + { + let kvm_pt_mode_path = "/sys/module/kvm_intel/parameters/pt_mode"; + if let Ok(s) = fs::read_to_string(kvm_pt_mode_path) { + match s.trim().parse::().map(TryInto::try_into) { + Ok(Ok(KvmPTMode::System)) => (), + Ok(Ok(KvmPTMode::HostGuest)) => reasons.push(format!( + "KVM Intel PT mode must be set to {:?} `{}` to be used with libafl_qemu", + KvmPTMode::System, + KvmPTMode::System as i32 + )), + _ => reasons.push(format!( + "Failed to parse KVM Intel PT mode in {kvm_pt_mode_path}" + )), + } + }; + } + #[cfg(not(target_os = "linux"))] + reasons.push("Only linux hosts are supported at the moment".to_owned()); + + if reasons.is_empty() { + Ok(()) + } else { + Err(reasons.join("; ")) + } +} + +/// Convert [`PtError`] into [`Error`] +#[inline] +#[must_use] +pub fn error_from_pt_error(err: PtError) -> Error { + Error::unknown(err.to_string()) +} + +#[cfg(target_os = "linux")] +fn availability_in_linux() -> Result<(), String> { + let mut reasons = Vec::new(); + match linux_version() { + // https://docs.rs/perf-event-open-sys/4.0.0/perf_event_open_sys/#kernel-versions + Ok(ver) if ver >= (5, 19, 4) => {} + Ok((major, minor, patch)) => reasons.push(format!( + "Kernel version {major}.{minor}.{patch} is older than 5.19.4 and might not work." + )), + Err(()) => reasons.push("Failed to retrieve kernel version".to_owned()), + } + + if let Err(e) = &*PERF_EVENT_TYPE { + reasons.push(e.clone()); + } + + if let Err(e) = &*NR_ADDR_FILTERS { + reasons.push(e.clone()); + } + + // official way of knowing if perf_event_open() support is enabled + // https://man7.org/linux/man-pages/man2/perf_event_open.2.html + let perf_event_support_path = "/proc/sys/kernel/perf_event_paranoid"; + if !Path::new(perf_event_support_path).exists() { + reasons.push(format!( + "perf_event_open() support is not enabled: {perf_event_support_path} not found" + )); + } + + // TODO check also the value of perf_event_paranoid, check which values are required by pt + // https://www.kernel.org/doc/Documentation/sysctl/kernel.txt + // also, looks like it is distribution dependent + // https://askubuntu.com/questions/1400874/what-does-perf-paranoia-level-four-do + // CAP_SYS_ADMIN might make this check useless + + match caps::read(None, CapSet::Permitted) { + Ok(current_capabilities) => { + let required_caps = [ + Capability::CAP_IPC_LOCK, + Capability::CAP_SYS_PTRACE, + Capability::CAP_SYS_ADMIN, // TODO: CAP_PERFMON doesn't look to be enough!? + Capability::CAP_SYSLOG, + ]; + + for rc in required_caps { + if !current_capabilities.contains(&rc) { + reasons.push(format!("Required capability {rc} missing")); + } + } + } + Err(e) => reasons.push(format!("Failed to read linux capabilities: {e}")), + }; + + if reasons.is_empty() { + Ok(()) + } else { + Err(reasons.join("; ")) + } +} + +#[cfg(target_os = "linux")] +fn new_perf_event_attr_intel_pt() -> Result { + let type_ = match &*PERF_EVENT_TYPE { + Ok(t) => Ok(*t), + Err(e) => Err(Error::unsupported(e.clone())), + }?; + let config = PtConfig::builder() + .with_noretcomp(true) + .with_psb_period(u4::new(0)) + .build() + .raw_value; + + let mut attr = perf_event_attr { + size: size_of::() as u32, + type_, + config, + ..Default::default() + }; + + // Do not enable tracing as soon as the perf_event_open syscall is issued + attr.set_disabled(true.into()); + + Ok(attr) +} + +#[cfg(target_os = "linux")] +fn setup_perf_buffer(fd: &OwnedFd, perf_buffer_size: usize) -> Result<*mut c_void, Error> { + match unsafe { + libc::mmap( + ptr::null_mut(), + perf_buffer_size, + libc::PROT_READ | libc::PROT_WRITE, + libc::MAP_SHARED, + fd.as_raw_fd(), + 0, + ) + } { + libc::MAP_FAILED => Err(Error::last_os_error("IntelPT: Failed to mmap perf buffer")), + mmap_addr => Ok(mmap_addr), + } +} + +#[cfg(target_os = "linux")] +fn setup_perf_aux_buffer(fd: &OwnedFd, size: u64, offset: u64) -> Result<*mut c_void, Error> { + match unsafe { + libc::mmap( + ptr::null_mut(), + size as usize, + // PROT_WRITE sets PT to stop when the buffer is full + libc::PROT_READ | libc::PROT_WRITE, + libc::MAP_SHARED, + fd.as_raw_fd(), + i64::try_from(offset)?, + ) + } { + libc::MAP_FAILED => Err(Error::last_os_error( + "IntelPT: Failed to mmap perf aux buffer", + )), + mmap_addr => Ok(mmap_addr), + } +} + +#[cfg(target_os = "linux")] +fn linux_version() -> Result<(usize, usize, usize), ()> { + let mut uname_data = libc::utsname { + sysname: [0; 65], + nodename: [0; 65], + release: [0; 65], + version: [0; 65], + machine: [0; 65], + domainname: [0; 65], + }; + + if unsafe { libc::uname(&mut uname_data) } != 0 { + return Err(()); + } + + let release = unsafe { CStr::from_ptr(uname_data.release.as_ptr()) }; + let mut parts = release + .to_bytes() + .split(|&c| c == b'.' || c == b'-') + .take(3) + .map(|s| String::from_utf8_lossy(s).parse::()); + if let (Some(Ok(major)), Some(Ok(minor)), Some(Ok(patch))) = + (parts.next(), parts.next(), parts.next()) + { + Ok((major, minor, patch)) + } else { + Err(()) + } +} + +#[cfg(target_os = "linux")] +#[inline] +const fn next_page_aligned_addr(address: u64) -> u64 { + (address + PAGE_SIZE as u64 - 1) & !(PAGE_SIZE as u64 - 1) +} + +// copy pasted from libafl_qemu/src/modules/edges.rs +// adapted from https://xorshift.di.unimi.it/splitmix64.c +#[cfg(target_os = "linux")] +#[inline] +#[must_use] +const fn hash_me(mut x: u64) -> u64 { + x = (x ^ (x.overflowing_shr(30).0)) + .overflowing_mul(0xbf58476d1ce4e5b9) + .0; + x = (x ^ (x.overflowing_shr(27).0)) + .overflowing_mul(0x94d049bb133111eb) + .0; + x ^ (x.overflowing_shr(31).0) +} + +#[cfg(target_os = "linux")] +#[inline] +fn smp_rmb() { + // SAFETY: just a memory barrier + unsafe { + core::arch::asm!("lfence", options(nostack, preserves_flags)); + } +} + +#[cfg(target_os = "linux")] +#[inline] +const fn wrap_aux_pointer(ptr: u64, perf_aux_buffer_size: usize) -> u64 { + ptr & (perf_aux_buffer_size as u64 - 1) +} + +#[cfg(test)] +mod test { + #[cfg(target_os = "linux")] + use arbitrary_int::Number; + use static_assertions::assert_eq_size; + + use super::*; + + // Only 64-bit systems are supported, ensure we can use usize and u64 interchangeably + assert_eq_size!(usize, u64); + + /// Quick way to check if your machine is compatible with Intel PT's features used by libafl + /// + /// Simply run `cargo test intel_pt_check_availability -- --show-output` + #[test] + fn intel_pt_check_availability() { + print!("Intel PT availability:\t\t\t"); + match availability() { + Ok(()) => println!("✔"), + Err(e) => println!("❌\tReasons: {e}"), + } + + print!("Intel PT availability in QEMU/KVM:\t"); + match availability_in_qemu_kvm() { + Ok(()) => println!("✔"), + Err(e) => println!("❌\tReasons: {e}"), + } + } + + #[test] + #[cfg(target_os = "linux")] + fn intel_pt_builder_default_values_are_valid() { + let default = IntelPT::builder(); + IntelPT::builder() + .perf_buffer_size(default.perf_buffer_size) + .unwrap(); + IntelPT::builder() + .perf_aux_buffer_size(default.perf_aux_buffer_size) + .unwrap(); + } + + #[test] + #[cfg(target_os = "linux")] + fn intel_pt_pt_config_noretcomp_format() { + let ptconfig_noretcomp = PtConfig::DEFAULT.with_noretcomp(true).raw_value; + let path = format!("{PT_EVENT_PATH}/format/noretcomp"); + let s = fs::read_to_string(&path).expect("Failed to read Intel PT config noretcomp format"); + assert!( + s.starts_with("config:"), + "Unexpected Intel PT config noretcomp format" + ); + let bit = s["config:".len()..] + .trim() + .parse::() + .expect("Failed to parse Intel PT config noretcomp format"); + assert_eq!( + ptconfig_noretcomp, + 0b1 << bit, + "Unexpected Intel PT config noretcomp format" + ); + } + + #[test] + #[cfg(target_os = "linux")] + fn intel_pt_pt_config_psb_period_format() { + let ptconfig_psb_period = PtConfig::DEFAULT.with_psb_period(u4::MAX).raw_value; + let path = format!("{PT_EVENT_PATH}/format/psb_period"); + let s = + fs::read_to_string(&path).expect("Failed to read Intel PT config psb_period format"); + assert!( + s.starts_with("config:"), + "Unexpected Intel PT config psb_period format" + ); + let from = s["config:".len().."config:".len() + 2] + .parse::() + .expect("Failed to parse Intel PT config psb_period format"); + let to = s["config:".len() + 3..] + .trim() + .parse::() + .expect("Failed to parse Intel PT config psb_period format"); + let mut format = 0; + for bit in from..=to { + format |= 0b1 << bit; + } + assert_eq!( + ptconfig_psb_period, format, + "Unexpected Intel PT config psb_period format" + ); + } +} diff --git a/libafl_intelpt/tests/integration_tests_linux.rs b/libafl_intelpt/tests/integration_tests_linux.rs new file mode 100644 index 0000000000..ebd6f7c109 --- /dev/null +++ b/libafl_intelpt/tests/integration_tests_linux.rs @@ -0,0 +1,95 @@ +#![cfg(feature = "std")] +#![cfg(feature = "libipt")] +#![cfg(target_os = "linux")] + +use std::{arch::asm, process}; + +use libafl_intelpt::{availability, IntelPT}; +use libipt::Image; +use nix::{ + sys::{ + signal::{kill, raise, Signal}, + wait::{waitpid, WaitPidFlag}, + }, + unistd::{fork, ForkResult}, +}; +use proc_maps::get_process_maps; + +/// To run this test ensure that the executable has the required capabilities. +/// This can be achieved with the script `./run_integration_tests_linux_with_caps.sh` +#[test] +fn intel_pt_trace_fork() { + if let Err(reason) = availability() { + // Mark as `skipped` once this will be possible https://github.com/rust-lang/rust/issues/68007 + println!("Intel PT is not available, skipping test. Reasons:"); + println!("{reason}"); + return; + } + + let pid = match unsafe { fork() } { + Ok(ForkResult::Parent { child }) => child, + Ok(ForkResult::Child) => { + raise(Signal::SIGSTOP).expect("Failed to stop the process"); + // This will generate a sequence of tnt packets containing 255 taken branches + unsafe { + let mut count = 0; + asm!( + "2:", + "add {0:r}, 1", + "cmp {0:r}, 255", + "jle 2b", + inout(reg) count, + options(nostack) + ); + let _ = count; + } + process::exit(0); + } + Err(e) => panic!("Fork failed {e}"), + }; + + let pt_builder = IntelPT::builder().pid(Some(pid.as_raw())); + let mut pt = pt_builder.build().expect("Failed to create IntelPT"); + pt.enable_tracing().expect("Failed to enable tracing"); + + waitpid(pid, Some(WaitPidFlag::WUNTRACED)).expect("Failed to wait for the child process"); + let maps = get_process_maps(pid.into()).unwrap(); + kill(pid, Signal::SIGCONT).expect("Failed to continue the process"); + + waitpid(pid, None).expect("Failed to wait for the child process"); + pt.disable_tracing().expect("Failed to disable tracing"); + + let mut image = Image::new(Some("test_trace_pid")).unwrap(); + for map in maps { + if map.is_exec() && map.filename().is_some() { + match image.add_file( + map.filename().unwrap().to_str().unwrap(), + map.offset as u64, + map.size() as u64, + None, + map.start() as u64, + ) { + Err(e) => println!( + "Error adding mapping for {:?}: {:?}, skipping", + map.filename().unwrap(), + e + ), + Ok(()) => println!( + "mapping for {:?} added successfully {:#x} - {:#x}", + map.filename().unwrap(), + map.start(), + map.start() + map.size() + ), + } + } + } + + let mut map = vec![0u16; 0x10_00]; + pt.decode_traces_into_map(&mut image, &mut map).unwrap(); + + let assembly_jump_id = map.iter().position(|count| *count >= 254); + assert!( + assembly_jump_id.is_some(), + "Assembly jumps not found in traces" + ); +} diff --git a/libafl_intelpt/tests/run_integration_tests_linux_with_caps.sh b/libafl_intelpt/tests/run_integration_tests_linux_with_caps.sh new file mode 100755 index 0000000000..78a1f41bee --- /dev/null +++ b/libafl_intelpt/tests/run_integration_tests_linux_with_caps.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +cargo test intel_pt_trace_fork --no-run + +for test_bin in ../target/debug/deps/integration_tests_linux-*; do + if file "$test_bin" | grep -q "ELF"; then + sudo setcap cap_ipc_lock,cap_sys_ptrace,cap_sys_admin,cap_syslog=ep "$test_bin" + fi +done + +cargo test intel_pt_trace_fork -- --show-output diff --git a/libafl_libfuzzer/Cargo.toml b/libafl_libfuzzer/Cargo.toml index 762c9d04db..5675ec8923 100644 --- a/libafl_libfuzzer/Cargo.toml +++ b/libafl_libfuzzer/Cargo.toml @@ -7,21 +7,16 @@ readme = "../README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "testing", "security"] edition = "2021" -categories = ["development-tools::testing", "fuzzing"] +categories = ["development-tools::testing"] -include = [ - "/src", - "/Cargo.toml", - "/build.rs", - "/libafl_libfuzzer_runtime", - "!/libafl_libfuzzer_runtime/target", -] +include = ["/src", "/Cargo.toml", "/build.rs", "/runtime"] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [build-dependencies] -cc = "1.0" -rustversion = "1.0" +cc = "1.0.106" +rustversion = "1.0.17" +toml = { version = "0.8.19", features = ["preserve_order"] } [features] default = ["fork"] @@ -32,9 +27,6 @@ document-features = ["dep:document-features"] ## Enables the derive macros for the arbitrary dependency, transparently forwarded from libfuzzer-sys arbitrary-derive = ["libfuzzer-sys/arbitrary-derive"] -## Enables fuzzer introspection with LibAFL's `introspection` feature -introspection = [] - ## Enables forking in the fuzzer runtime for restarting managers for Unix systems (on by default) fork = [] @@ -46,12 +38,18 @@ embed-runtime = [] ## 🐇 rabbit = [] +## For testing and publishing purposes only: enforce that the runtime uses versions rather than paths +libafl-libfuzzer-use-version = [] + [dependencies] libfuzzer-sys = { version = "0.4.7", default-features = false } -document-features = { version = "0.2", optional = true } +document-features = { workspace = true, optional = true } [package.metadata.docs.rs] features = ["document-features"] all-features = true rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/libafl_libfuzzer/README.md b/libafl_libfuzzer/README.md index cff72c08a6..05b1bf2f7c 100644 --- a/libafl_libfuzzer/README.md +++ b/libafl_libfuzzer/README.md @@ -37,17 +37,17 @@ to libfuzzer-sys = { version = "0.11.0", features = ["your", "features", "here"], package = "libafl_libfuzzer" } ``` -If, in the case that you want to work with experimental changes, the `libfuzzer-best` branch contains the current -experimental best version of `libafl_libfuzzer`. -To use the experimental version, use: +To use the most up-to-date version (with experimental changes), use: ```toml -libfuzzer-sys = { git = "https://github.com/AFLplusplus/LibAFL.git", branch = "libfuzzer-best", features = ["your", "features", "here"], package = "libafl_libfuzzer" } +libfuzzer-sys = { git = "https://github.com/AFLplusplus/LibAFL.git", features = ["your", "features", "here"], package = "libafl_libfuzzer" } ``` -As this branch generally offers the highest performance version of `libafl_libfuzzer`, we recommend the latter. +As the repository generally offers the highest performance version of `libafl_libfuzzer`, we recommend the latter. Remember to `cargo update` often if using the experimental changes, and please [submit an issue] -if you encounter problems while using `libfuzzer-best`! +if you encounter problems while using the git branch! + +For stability purposes, consider [specifying a commit](https://doc.rust-lang.org/cargo/reference/specifying-dependencies.html#choice-of-commit). #### macOS @@ -83,13 +83,13 @@ CXXFLAGS='-fsanitize=fuzzer-no-link' The runtime for `libafl_libfuzzer` may be used standalone as a direct replacement for libFuzzer with other targets as well. To do so, [ensure a recent nightly version of Rust is installed](https://rustup.rs/), then enter the -[`libafl_libfuzzer_runtime`](libafl_libfuzzer_runtime) folder and build the runtime with the following command: +[`libafl_libfuzzer_runtime`](../libafl_libfuzzer_runtime) folder and build the runtime with the following command: ```bash ./build.sh ``` -The static library will be available at `libFuzzer.a` in the [`libafl_libfuzzer_runtime`](libafl_libfuzzer_runtime) +The static library will be available at `libFuzzer.a` in the [`libafl_libfuzzer_runtime`](../libafl_libfuzzer_runtime) directory. If you encounter build failures without clear error outputs that help you resolve the issue, please [submit an issue]. @@ -144,7 +144,8 @@ to partial support of libfuzzer flags, `libafl_libfuzzer` offers: - `-fork` and `-jobs` - in `libafl_libfuzzer`, these are synonymous - `-ignore_crashes`, `-ignore_ooms`, and `-ignore_timeouts` - - note that setting `-tui=1` enables these flags by default, so you'll need to explicitly mention `-ignore_...=0` to disable them + - note that setting `-tui=1` enables these flags by default, so you'll need to explicitly mention `-ignore_...=0` to + disable them - `-rss_limit_mb` and `-malloc_limit_mb` - `-ignore_remaining_args` - `-shrink` @@ -152,7 +153,11 @@ to partial support of libfuzzer flags, `libafl_libfuzzer` offers: - `-close_fd_mask` [libFuzzer]: https://llvm.org/docs/LibFuzzer.html + [`libfuzzer-sys`]: https://docs.rs/libfuzzer-sys/ + [de-facto deprecation of libFuzzer]: https://llvm.org/docs/LibFuzzer.html#status + [submit an issue]: https://github.com/AFLplusplus/LibAFL/issues/new/choose + [grimoire]: https://www.usenix.org/conference/usenixsecurity19/presentation/blazytko diff --git a/libafl_libfuzzer/build.rs b/libafl_libfuzzer/build.rs index b8bc98a8a3..1293fe89b6 100644 --- a/libafl_libfuzzer/build.rs +++ b/libafl_libfuzzer/build.rs @@ -1,4 +1,6 @@ use std::{ + error::Error, + fs, fs::File, io::{BufRead, BufReader, BufWriter, Write}, path::{Path, PathBuf}, @@ -9,28 +11,28 @@ use std::{ const NAMESPACE: &str = "🐇"; #[cfg(not(feature = "rabbit"))] const NAMESPACE: &str = "__libafl"; -const NAMESPACE_LEN: usize = NAMESPACE.as_bytes().len(); +const NAMESPACE_LEN: usize = NAMESPACE.len(); #[allow(clippy::too_many_lines)] -fn main() { +fn main() -> Result<(), Box> { if cfg!(any(clippy, docsrs)) { - return; // skip when clippy or docs is running + return Ok(()); // skip when clippy or docs is running } if cfg!(not(any(target_os = "linux", target_os = "macos"))) { println!( "cargo:warning=The libafl_libfuzzer runtime may only be built for linux or macos; failing fast." ); - return; + return Ok(()); } println!("cargo:rerun-if-changed=libafl_libfuzzer_runtime/src"); - println!("cargo:rerun-if-changed=libafl_libfuzzer_runtime/Cargo.toml"); println!("cargo:rerun-if-changed=libafl_libfuzzer_runtime/build.rs"); let custom_lib_dir = AsRef::::as_ref(&std::env::var_os("OUT_DIR").unwrap()).join("libafl_libfuzzer"); - std::fs::create_dir_all(&custom_lib_dir) + let custom_lib_target = custom_lib_dir.join("target"); + fs::create_dir_all(&custom_lib_target) .expect("Couldn't create the output directory for the fuzzer runtime build"); let lib_src: PathBuf = AsRef::::as_ref(&std::env::var_os("CARGO_MANIFEST_DIR").unwrap()) @@ -51,8 +53,6 @@ fn main() { .env("PATH", std::env::var_os("PATH").unwrap()) .current_dir(&lib_src); - let _ = std::fs::rename(lib_src.join("Cargo.toml.orig"), lib_src.join("Cargo.toml")); - command.arg("build"); let mut features = vec![]; @@ -60,9 +60,6 @@ fn main() { if cfg!(any(feature = "fork")) { features.push("fork"); } - if cfg!(any(feature = "introspection")) { - features.push("libafl/introspection"); - } if !features.is_empty() { command.arg("--features").arg(features.join(",")); @@ -72,143 +69,203 @@ fn main() { .arg("--release") .arg("--no-default-features") .arg("--target-dir") - .arg(&custom_lib_dir) + .arg(&custom_lib_target) .arg("--target") .arg(std::env::var_os("TARGET").unwrap()); + // detect if we are a version or path/git dep, or testing version-based behavior + if fs::exists("../libafl_libfuzzer_runtime")? && !cfg!(feature = "libafl-libfuzzer-use-version") + { + command.current_dir("../libafl_libfuzzer_runtime"); + } else { + // we are being used as a version dep; we need to create the package virtually + + // remove old files; we need to trigger a rebuild if our path changes! + let _ = fs::remove_file(custom_lib_dir.join("src")); + let _ = fs::remove_dir_all(custom_lib_dir.join("src")); // maybe a dir in windows + let _ = fs::remove_file(custom_lib_dir.join("build.rs")); + let _ = fs::remove_file(custom_lib_dir.join("Cargo.toml")); + + #[cfg(unix)] + { + // create symlinks for all the source files + use std::os::unix::fs::symlink; + + // canonicalize can theoretically fail if we are within a non-executable directory? + symlink(fs::canonicalize("runtime/src")?, custom_lib_dir.join("src"))?; + symlink( + fs::canonicalize("runtime/build.rs")?, + custom_lib_dir.join("build.rs"), + )?; + } + #[cfg(not(unix))] + { + todo!("copy all the source files"); // we don't support libafl_libfuzzer for others rn + } + let mut template: toml::Value = + toml::from_str(&fs::read_to_string("runtime/Cargo.toml.template")?)?; + let toml::Value::Table(root) = &mut template else { + unreachable!("Invalid Cargo.toml"); + }; + root.insert( + "workspace".to_string(), + toml::Value::Table(toml::Table::new()), + ); + let Some(toml::Value::Table(deps)) = root.get_mut("dependencies") else { + unreachable!("Invalid Cargo.toml"); + }; + let version = env!("CARGO_PKG_VERSION"); + for (_name, spec) in deps { + if let toml::Value::Table(spec) = spec { + // replace all path deps with version deps + if spec.remove("path").is_some() { + spec.insert( + "version".to_string(), + toml::Value::String(version.to_string()), + ); + } + } + } + + let serialized = toml::to_string(&template)?; + fs::write(custom_lib_dir.join("Cargo.toml"), serialized)?; + + // build in this filled out template + command.current_dir(custom_lib_dir); + } + assert!( - command.status().map_or(false, |s| s.success()), + command.status().is_ok_and(|s| s.success()), "Couldn't build runtime crate! Did you remember to use nightly? (`rustup default nightly` to install)" ); - let mut archive_path = custom_lib_dir.join(std::env::var_os("TARGET").unwrap()); + let mut archive_path = custom_lib_target.join(std::env::var_os("TARGET").unwrap()); archive_path.push("release"); - if cfg!(unix) { - archive_path.push("libafl_libfuzzer_runtime.a"); - let target_libdir = Command::new("rustc") - .args(["--print", "target-libdir"]) - .output() - .expect("Couldn't find rustc's target-libdir"); - let target_libdir = String::from_utf8(target_libdir.stdout).unwrap(); - let target_libdir = Path::new(target_libdir.trim()); + archive_path.push("libafl_libfuzzer_runtime.a"); + let target_libdir = Command::new("rustc") + .args(["--print", "target-libdir"]) + .output() + .expect("Couldn't find rustc's target-libdir"); + let target_libdir = String::from_utf8(target_libdir.stdout).unwrap(); + let target_libdir = Path::new(target_libdir.trim()); - // NOTE: depends on llvm-tools - let rust_objcopy = target_libdir.join("../bin/llvm-objcopy"); - let nm = target_libdir.join("../bin/llvm-nm"); + // NOTE: depends on llvm-tools + let rust_objcopy = target_libdir.join("../bin/llvm-objcopy"); + let nm = target_libdir.join("../bin/llvm-nm"); - let redefined_archive_path = custom_lib_dir.join("libFuzzer.a"); - let redefined_symbols = custom_lib_dir.join("redefs.txt"); + let redefined_archive_path = custom_lib_target.join("libFuzzer.a"); + let redefined_symbols = custom_lib_target.join("redefs.txt"); - let mut nm_child = Command::new(nm) - .arg(&archive_path) - .stdout(Stdio::piped()) - .spawn() - .expect("llvm-nm works (are you using nightly?)"); + let mut nm_child = Command::new(nm) + .arg(&archive_path) + .stdout(Stdio::piped()) + .spawn() + .expect("llvm-nm works (are you using nightly?)"); - let mut redefinitions_file = BufWriter::new(File::create(&redefined_symbols).unwrap()); + let mut redefinitions_file = BufWriter::new(File::create(&redefined_symbols).unwrap()); - let zn_prefix = if cfg!(target_os = "macos") { - // macOS symbols have an extra `_` - "__ZN" - } else { - "_ZN" - }; + let zn_prefix = if cfg!(target_os = "macos") { + // macOS symbols have an extra `_` + "__ZN" + } else { + "_ZN" + }; - let replacement = format!("{zn_prefix}{NAMESPACE_LEN}{NAMESPACE}"); + let replacement = format!("{zn_prefix}{NAMESPACE_LEN}{NAMESPACE}"); - // redefine all the rust-mangled symbols we can - // TODO this will break when v0 mangling is stabilised - for line in BufReader::new(nm_child.stdout.take().unwrap()).lines() { - let line = line.unwrap(); + // redefine all the rust-mangled symbols we can + // TODO this will break when v0 mangling is stabilised + for line in BufReader::new(nm_child.stdout.take().unwrap()).lines() { + let line = line.unwrap(); - // Skip headers - if line.ends_with(':') || line.is_empty() { - continue; - } - let (_, symbol) = line.rsplit_once(' ').unwrap(); - - if symbol.starts_with(zn_prefix) { - writeln!( - redefinitions_file, - "{} {}", - symbol, - symbol.replacen(zn_prefix, &replacement, 1) - ) - .unwrap(); - } + // Skip headers + if line.ends_with(':') || line.is_empty() { + continue; } - redefinitions_file.flush().unwrap(); - drop(redefinitions_file); + let (_, symbol) = line.rsplit_once(' ').unwrap(); - assert!( - nm_child.wait().map_or(false, |s| s.success()), + if symbol.starts_with(zn_prefix) { + writeln!( + redefinitions_file, + "{} {}", + symbol, + symbol.replacen(zn_prefix, &replacement, 1) + ) + .unwrap(); + } + } + redefinitions_file.flush().unwrap(); + drop(redefinitions_file); + + assert!( + nm_child.wait().is_ok_and(|s| s.success()), "Couldn't link runtime crate! Do you have the llvm-tools component installed? (`rustup component add llvm-tools-preview` to install)" ); - let mut objcopy_command = Command::new(rust_objcopy); + let mut objcopy_command = Command::new(rust_objcopy); - for symbol in [ - "__rust_drop_panic", - "__rust_foreign_exception", - "rust_begin_unwind", - "rust_panic", - "rust_eh_personality", - "__rg_oom", - "__rdl_oom", - "__rdl_alloc", - "__rust_alloc", - "__rdl_dealloc", - "__rust_dealloc", - "__rdl_realloc", - "__rust_realloc", - "__rdl_alloc_zeroed", - "__rust_alloc_zeroed", - "__rust_alloc_error_handler", - "__rust_no_alloc_shim_is_unstable", - "__rust_alloc_error_handler_should_panic", - ] { - let mut symbol = symbol.to_string(); - // macOS symbols have an extra `_` - if cfg!(target_os = "macos") { - symbol.insert(0, '_'); - } - - objcopy_command - .arg("--redefine-sym") - .arg(format!("{symbol}={symbol}_libafl_libfuzzer_runtime")); + for symbol in [ + "__rust_drop_panic", + "__rust_foreign_exception", + "rust_begin_unwind", + "rust_panic", + "rust_eh_personality", + "__rg_oom", + "__rdl_oom", + "__rdl_alloc", + "__rust_alloc", + "__rdl_dealloc", + "__rust_dealloc", + "__rdl_realloc", + "__rust_realloc", + "__rdl_alloc_zeroed", + "__rust_alloc_zeroed", + "__rust_alloc_error_handler", + "__rust_no_alloc_shim_is_unstable", + "__rust_alloc_error_handler_should_panic", + ] { + let mut symbol = symbol.to_string(); + // macOS symbols have an extra `_` + if cfg!(target_os = "macos") { + symbol.insert(0, '_'); } objcopy_command - .arg("--redefine-syms") - .arg(redefined_symbols) - .args([&archive_path, &redefined_archive_path]); + .arg("--redefine-sym") + .arg(format!("{symbol}={symbol}_libafl_libfuzzer_runtime")); + } - assert!( - objcopy_command.status().map_or(false, |s| s.success()), + objcopy_command + .arg("--redefine-syms") + .arg(redefined_symbols) + .args([&archive_path, &redefined_archive_path]); + + assert!( + objcopy_command.status().is_ok_and(|s| s.success()), "Couldn't rename allocators in the runtime crate! Do you have the llvm-tools component installed? (`rustup component add llvm-tools-preview` to install)" ); - #[cfg(feature = "embed-runtime")] - { - // NOTE: lib, .a are added always on unix-like systems as described in: - // https://gist.github.com/novafacing/1389cbb2f0a362d7eb103e67b4468e2b - println!( - "cargo:rustc-env=LIBAFL_LIBFUZZER_RUNTIME_PATH={}", - redefined_archive_path.display() - ); - } - + #[cfg(feature = "embed-runtime")] + { + // NOTE: lib, .a are added always on unix-like systems as described in: + // https://gist.github.com/novafacing/1389cbb2f0a362d7eb103e67b4468e2b println!( - "cargo:rustc-link-search=native={}", - custom_lib_dir.to_str().unwrap() + "cargo:rustc-env=LIBAFL_LIBFUZZER_RUNTIME_PATH={}", + redefined_archive_path.display() ); - println!("cargo:rustc-link-lib=static=Fuzzer"); - - if cfg!(target_os = "macos") { - println!("cargo:rustc-link-lib=c++"); - } else { - println!("cargo:rustc-link-lib=stdc++"); - } } + + println!( + "cargo:rustc-link-search=native={}", + custom_lib_target.to_str().unwrap() + ); + println!("cargo:rustc-link-lib=static=Fuzzer"); + + if cfg!(target_os = "macos") { + println!("cargo:rustc-link-lib=c++"); + } else { + println!("cargo:rustc-link-lib=stdc++"); + } + Ok(()) } diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/Cargo.toml b/libafl_libfuzzer/libafl_libfuzzer_runtime/Cargo.toml deleted file mode 100644 index 7e069ce955..0000000000 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/Cargo.toml +++ /dev/null @@ -1,56 +0,0 @@ -[package] -name = "libafl_libfuzzer_runtime" -version = "0.13.0" -edition = "2021" -publish = false - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[features] -default = ["fork"] -## Enables forking mode for the LibAFL launcher (instead of starting new processes) -fork = ["libafl/fork"] -track_hit_feedbacks = ["libafl/track_hit_feedbacks", "libafl_targets/track_hit_feedbacks"] - -[profile.release] -lto = true -codegen-units = 1 -opt-level = 3 -debug = true - -# debug-free release profile for fuzzbench due to space restrictions -[profile.release-fuzzbench] -inherits = "release" -debug = false -strip = true - - -[lib] -name = "afl_libfuzzer_runtime" # TODO fix name once cargo-fuzz stops stripping double-prefixes -path = "src/lib.rs" -crate-type = ["staticlib", "rlib"] - -[dependencies] -libafl = { path = "../../libafl", default-features = false, features = ["std", "derive", "llmp_compression", "rand_trait", "regex", "errors_backtrace", "serdeany_autoreg", "tui_monitor", "unicode"] } -libafl_bolts = { path = "../../libafl_bolts", default-features = false, features = ["std", "derive", "llmp_compression", "rand_trait", "serdeany_autoreg", "errors_backtrace"] } -libafl_targets = { path = "../../libafl_targets", features = ["sancov_8bit", "sancov_cmplog", "sancov_pcguard", "libfuzzer", "libfuzzer_oom", "libfuzzer_define_run_driver", "libfuzzer_interceptors", "sanitizers_flags", "whole_archive", "sanitizer_interfaces"] } - -ahash = { version = "0.8.3", default-features = false } -libc = "0.2.1" -log = "0.4.20" -mimalloc = { version = "0.1.34", default-features = false } -num-traits = "0.2.15" -rand = "0.8.5" -serde = { version = "1.0", features = ["derive"] } # serialization lib - -# for identifying if we can grimoire-ify -utf8-chars = "3.0.1" - -env_logger = "0.10" - - -[build-dependencies] -bindgen = "0.69.4" -cc = { version = "1.0", features = ["parallel"] } - -[workspace] diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/build.sh b/libafl_libfuzzer/libafl_libfuzzer_runtime/build.sh deleted file mode 100755 index 5114009450..0000000000 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/build.sh +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -set -e - -SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) - -cd "${SCRIPT_DIR}" || exit 1 - -if [ -z ${1+x} ]; then - profile=release -else - profile="$1" -fi - -if ! cargo +nightly --version >& /dev/null; then - echo -e "You must install a recent Rust nightly to build the libafl_libfuzzer runtime!" - exit 1 -fi - -RUSTC_BIN="$(cargo +nightly rustc -Zunstable-options --print target-libdir)/../bin" -RUST_LLD="${RUSTC_BIN}/rust-lld" -RUST_AR="${RUSTC_BIN}/llvm-ar" - -if ! [ -f "${RUST_LLD}" ] && [ -f "${RUST_AR}" ]; then - echo -e "You must install the llvm-tools component: \`rustup component add llvm-tools'" - exit 1 -fi - -cargo +nightly build --profile "$profile" - -tmpdir="" - -cleanup() { - rm -rf "${tmpdir}" - exit -} -trap cleanup INT TERM - -tmpdir="$(mktemp -d)" -"${RUST_LLD}" -flavor gnu -r --whole-archive target/release/libafl_libfuzzer_runtime.a -o "${tmpdir}/libFuzzer.o" -"${RUST_AR}" cr libFuzzer.a "${tmpdir}/libFuzzer.o" - -echo "Done! Wrote the runtime to \`${SCRIPT_DIR}/libFuzzer.a'" -cleanup diff --git a/libafl_libfuzzer/publish.sh b/libafl_libfuzzer/publish.sh deleted file mode 100755 index 1c0f264eca..0000000000 --- a/libafl_libfuzzer/publish.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -set -x - -mv libafl_libfuzzer_runtime/Cargo.toml libafl_libfuzzer_runtime/Cargo.toml.orig -cargo publish --allow-dirty --no-verify "$@" -mv libafl_libfuzzer_runtime/Cargo.toml.orig libafl_libfuzzer_runtime/Cargo.toml diff --git a/libafl_libfuzzer/runtime/Cargo.toml.template b/libafl_libfuzzer/runtime/Cargo.toml.template new file mode 100644 index 0000000000..c987b43c07 --- /dev/null +++ b/libafl_libfuzzer/runtime/Cargo.toml.template @@ -0,0 +1,85 @@ +[package] +name = "libafl_libfuzzer_runtime" +version = "0.14.1" +edition = "2021" +publish = false + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[features] +default = ["fork"] +## Enables forking mode for the LibAFL launcher (instead of starting new processes) +fork = ["libafl/fork"] +track_hit_feedbacks = [ + "libafl/track_hit_feedbacks", + "libafl_targets/track_hit_feedbacks", +] + +[profile.release] +lto = true +codegen-units = 1 +opt-level = 3 +debug = true + +# debug-free release profile for fuzzbench due to space restrictions +[profile.release-fuzzbench] +inherits = "release" +debug = false +strip = true + +[lib] +name = "afl_libfuzzer_runtime" # historically, cargo-fuzz strips double-prefixes; maintain compat +crate-type = ["staticlib", "rlib"] + +[dependencies] +libafl = { path = "../libafl", default-features = false, features = [ + "std", + "derive", + "llmp_compression", + "rand_trait", + "regex", + "errors_backtrace", + "serdeany_autoreg", + "tui_monitor", + "unicode", +] } +libafl_bolts = { path = "../libafl_bolts", default-features = false, features = [ + "std", + "derive", + "llmp_compression", + "rand_trait", + "serdeany_autoreg", + "errors_backtrace", +] } +libafl_targets = { path = "../libafl_targets", features = [ + "sancov_8bit", + "sancov_cmplog", + "sancov_value_profile", + "sancov_pcguard", + "libfuzzer", + "libfuzzer_oom", + "libfuzzer_define_run_driver", + "libfuzzer_interceptors", + "sanitizers_flags", + "whole_archive", + "sanitizer_interfaces", +] } + +ahash = { version = "0.8.11", default-features = false } +libc = "0.2.159" +log = { version = "0.4.22", features = ["release_max_level_info"] } +mimalloc = { version = "0.1.43", default-features = false } +num-traits = { version = "0.2.19", default-features = true } +rand = "0.8.5" +serde = { version = "1.0.210", default-features = true, features = [ + "derive", +] } # serialization lib +hashbrown = { version = "0.14.5", default-features = true } + +# for identifying if we can grimoire-ify +utf8-chars = "3.0.4" +env_logger = "0.11.5" + +[build-dependencies] +bindgen = "0.70.1" +cc = { version = "1.1.22", features = ["parallel"] } diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/build.rs b/libafl_libfuzzer/runtime/build.rs similarity index 100% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/build.rs rename to libafl_libfuzzer/runtime/build.rs diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/corpus.rs b/libafl_libfuzzer/runtime/src/corpus.rs similarity index 92% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/corpus.rs rename to libafl_libfuzzer/runtime/src/corpus.rs index 3a287b4d9b..a5ec612193 100644 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/corpus.rs +++ b/libafl_libfuzzer/runtime/src/corpus.rs @@ -1,17 +1,18 @@ use std::{ cell::RefCell, - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::BTreeMap, io::ErrorKind, path::PathBuf, sync::atomic::{AtomicU64, Ordering}, }; +use hashbrown::{hash_map::Entry, HashMap}; use libafl::{ corpus::{ inmemory::{TestcaseStorage, TestcaseStorageMap}, Corpus, CorpusId, Testcase, }, - inputs::{Input, UsesInput}, + inputs::Input, }; use libafl_bolts::Error; use serde::{Deserialize, Serialize}; @@ -54,28 +55,28 @@ where } /// Touch this index and maybe evict an entry if we have touched an input which was unloaded. - fn touch(&self, idx: CorpusId, corpus: &TestcaseStorageMap) -> Result<(), Error> { + fn touch(&self, id: CorpusId, corpus: &TestcaseStorageMap) -> Result<(), Error> { let mut loaded_mapping = self.loaded_mapping.borrow_mut(); let mut loaded_entries = self.loaded_entries.borrow_mut(); - match loaded_mapping.entry(idx) { + match loaded_mapping.entry(id) { Entry::Occupied(mut e) => { let &old = e.get(); let new = self.next_recency.fetch_add(1, Ordering::Relaxed); e.insert(new); loaded_entries.remove(&old); - loaded_entries.insert(new, idx); + loaded_entries.insert(new, id); } Entry::Vacant(e) => { // new entry! send it in let new = self.next_recency.fetch_add(1, Ordering::Relaxed); e.insert(new); - loaded_entries.insert(new, idx); + loaded_entries.insert(new, id); } } if loaded_entries.len() > self.max_len { - let idx = loaded_entries.pop_first().unwrap().1; // cannot panic - let cell = corpus.get(idx).ok_or_else(|| { - Error::key_not_found(format!("Tried to evict non-existent entry {idx}")) + let id = loaded_entries.pop_first().unwrap().1; // cannot panic + let cell = corpus.get(id).ok_or_else(|| { + Error::key_not_found(format!("Tried to evict non-existent entry {id}")) })?; let mut tc = cell.try_borrow_mut()?; let _ = tc.input_mut().take(); @@ -97,7 +98,7 @@ where testcase: RefCell>, is_disabled: bool, ) -> Result { - let idx = if is_disabled { + let id = if is_disabled { self.mapping.insert_disabled(testcase) } else { self.mapping.insert(testcase) @@ -107,7 +108,7 @@ where } else { &self.mapping.enabled }; - let mut testcase = corpus.get(idx).unwrap().borrow_mut(); + let mut testcase = corpus.get(id).unwrap().borrow_mut(); match testcase.file_path() { Some(path) if path.canonicalize()?.starts_with(&self.corpus_dir) => { // if it's already in the correct dir, we retain it @@ -118,7 +119,7 @@ where "The testcase, when added to the corpus, must have an input present!", ) })?; - let name = input.generate_name(idx.into()); + let name = input.generate_name(Some(id)); let path = self.corpus_dir.join(&name); match input.to_file(&path) { @@ -134,22 +135,17 @@ where testcase.file_path_mut().replace(path); } }; - self.touch(idx, corpus)?; - Ok(idx) + self.touch(id, corpus)?; + Ok(id) } } -impl UsesInput for LibfuzzerCorpus -where - I: Input + Serialize + for<'de> Deserialize<'de>, -{ - type Input = I; -} - impl Corpus for LibfuzzerCorpus where I: Input + Serialize + for<'de> Deserialize<'de>, { + type Input = I; + #[inline] fn count(&self) -> usize { self.mapping.enabled.map.len() @@ -171,7 +167,7 @@ where fn replace( &mut self, - _idx: CorpusId, + _id: CorpusId, _testcase: Testcase, ) -> Result, Error> { unimplemented!("It is unsafe to use this corpus variant with replace!"); @@ -281,17 +277,12 @@ where } } -impl UsesInput for ArtifactCorpus -where - I: Input + Serialize + for<'de> Deserialize<'de>, -{ - type Input = I; -} - impl Corpus for ArtifactCorpus where I: Input + Serialize + for<'de> Deserialize<'de>, { + type Input = I; + fn count(&self) -> usize { self.count } @@ -335,7 +326,7 @@ where fn replace( &mut self, - _idx: CorpusId, + _id: CorpusId, _testcase: Testcase, ) -> Result, Error> { unimplemented!("Artifact prefix is thin and cannot get, replace, or remove.") @@ -350,7 +341,7 @@ where .count .checked_sub(1) .map(CorpusId::from) - .map_or(false, |last| last == id) + .is_some_and(|last| last == id) { self.last.as_ref() } else { diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/feedbacks.rs b/libafl_libfuzzer/runtime/src/feedbacks.rs similarity index 84% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/feedbacks.rs rename to libafl_libfuzzer/runtime/src/feedbacks.rs index a03c813459..e67b5f8a72 100644 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/feedbacks.rs +++ b/libafl_libfuzzer/runtime/src/feedbacks.rs @@ -5,15 +5,13 @@ use std::borrow::Cow; use libafl::{ alloc, corpus::Testcase, - events::EventFirer, executors::ExitKind, - feedbacks::{Feedback, MinMapFeedback}, + feedbacks::{Feedback, MinMapFeedback, StateInitializer}, inputs::{BytesInput, Input}, - observers::ObserversTuple, state::State, Error, HasMetadata, }; -use libafl_bolts::{impl_serdeany, Named}; +use libafl_bolts::{impl_serdeany, tuples::MatchNameRef, Named}; use libafl_targets::OomFeedback; use serde::{Deserialize, Serialize}; @@ -43,22 +41,20 @@ impl Named for LibfuzzerKeepFeedback { } } -impl Feedback for LibfuzzerKeepFeedback +impl StateInitializer for LibfuzzerKeepFeedback {} + +impl Feedback for LibfuzzerKeepFeedback where S: State, { - fn is_interesting( + fn is_interesting( &mut self, _state: &mut S, _manager: &mut EM, _input: &S::Input, _observers: &OT, _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { + ) -> Result { Ok(*self.keep.borrow()) } @@ -108,7 +104,7 @@ impl LibfuzzerCrashCauseFeedback { let base = if let Some(filename) = testcase.filename() { filename.clone() } else { - let name = testcase.input().as_ref().unwrap().generate_name(0); + let name = testcase.input().as_ref().unwrap().generate_name(None); name }; let file_path = self.artifact_prefix.dir().join(format!( @@ -119,22 +115,21 @@ impl LibfuzzerCrashCauseFeedback { } } -impl Feedback for LibfuzzerCrashCauseFeedback +impl StateInitializer for LibfuzzerCrashCauseFeedback {} + +impl Feedback for LibfuzzerCrashCauseFeedback where S: State, + OT: MatchNameRef, { - fn is_interesting( + fn is_interesting( &mut self, _state: &mut S, _manager: &mut EM, - _input: &S::Input, + _input: &BytesInput, _observers: &OT, exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { + ) -> Result { self.exit_kind = *exit_kind; Ok(false) } @@ -143,16 +138,13 @@ where Ok(false) } - fn append_metadata( + fn append_metadata( &mut self, _state: &mut S, _manager: &mut EM, _observers: &OT, - testcase: &mut Testcase, - ) -> Result<(), Error> - where - OT: ObserversTuple, - { + testcase: &mut Testcase, + ) -> Result<(), Error> { match self.exit_kind { ExitKind::Crash | ExitKind::Oom if OomFeedback::oomed() => { self.set_filename("oom", testcase); @@ -183,4 +175,4 @@ where } } -pub type ShrinkMapFeedback = MinMapFeedback, usize>; +pub type ShrinkMapFeedback = MinMapFeedback>; diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/fuzz.rs b/libafl_libfuzzer/runtime/src/fuzz.rs similarity index 90% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/fuzz.rs rename to libafl_libfuzzer/runtime/src/fuzz.rs index f671bf120a..94f3954559 100644 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/fuzz.rs +++ b/libafl_libfuzzer/runtime/src/fuzz.rs @@ -13,17 +13,14 @@ use std::{ use libafl::{ corpus::Corpus, events::{ - launcher::Launcher, EventConfig, ProgressReporter, SimpleEventManager, + launcher::Launcher, EventConfig, EventProcessor, ProgressReporter, SimpleEventManager, SimpleRestartingEventManager, }, executors::ExitKind, inputs::UsesInput, - monitors::{ - tui::{ui::TuiUI, TuiMonitor}, - Monitor, MultiMonitor, - }, - stages::{HasCurrentStage, StagesTuple}, - state::{HasExecutions, HasLastReportTime, HasSolutions, UsesState}, + monitors::{tui::TuiMonitor, Monitor, MultiMonitor}, + stages::{HasCurrentStageId, StagesTuple}, + state::{HasExecutions, HasLastReportTime, HasSolutions, Stoppable, UsesState}, Error, Fuzzer, HasMetadata, }; use libafl_bolts::{ @@ -66,9 +63,15 @@ fn do_fuzz( ) -> Result<(), Error> where F: Fuzzer, - S: HasMetadata + HasExecutions + UsesInput + HasSolutions + HasLastReportTime + HasCurrentStage, + S: HasMetadata + + HasExecutions + + UsesInput + + HasSolutions + + HasLastReportTime + + HasCurrentStageId + + Stoppable, E: UsesState, - EM: ProgressReporter, + EM: ProgressReporter + EventProcessor, ST: StagesTuple, { if let Some(solution) = state.solutions().last() { @@ -202,7 +205,10 @@ pub fn fuzz( if let Some(forks) = options.forks() { let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory"); if options.tui() { - let monitor = TuiMonitor::new(TuiUI::new(options.fuzzer_name().to_string(), true)); + let monitor = TuiMonitor::builder() + .title(options.fuzzer_name()) + .enhanced_graphics(true) + .build(); fuzz_many_forking(options, harness, shmem_provider, forks, monitor) } else if forks == 1 { let monitor = MultiMonitor::with_time( @@ -221,7 +227,10 @@ pub fn fuzz( // if the user specifies TUI, we assume they want to fork; it would not be possible to use // TUI safely otherwise let shmem_provider = StdShMemProvider::new().expect("Failed to init shared memory"); - let monitor = TuiMonitor::new(TuiUI::new(options.fuzzer_name().to_string(), true)); + let monitor = TuiMonitor::builder() + .title(options.fuzzer_name()) + .enhanced_graphics(true) + .build(); fuzz_many_forking(options, harness, shmem_provider, 1, monitor) } else { destroy_output_fds(options); diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/harness_wrap.cpp b/libafl_libfuzzer/runtime/src/harness_wrap.cpp similarity index 100% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/harness_wrap.cpp rename to libafl_libfuzzer/runtime/src/harness_wrap.cpp diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/harness_wrap.h b/libafl_libfuzzer/runtime/src/harness_wrap.h similarity index 100% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/harness_wrap.h rename to libafl_libfuzzer/runtime/src/harness_wrap.h diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/lib.rs b/libafl_libfuzzer/runtime/src/lib.rs similarity index 91% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/lib.rs rename to libafl_libfuzzer/runtime/src/lib.rs index 54aca0134d..8e48992a24 100644 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/lib.rs +++ b/libafl_libfuzzer/runtime/src/lib.rs @@ -139,7 +139,7 @@ impl CustomMutationStatus { } macro_rules! fuzz_with { - ($options:ident, $harness:ident, $operation:expr, $and_then:expr, $edge_maker:expr) => {{ + ($options:ident, $harness:ident, $operation:expr, $and_then:expr, $edge_maker:expr, $extra_feedback:expr, $extra_obsv:expr) => {{ use libafl_bolts::{ rands::StdRand, tuples::{Merge, tuple_list}, @@ -169,10 +169,11 @@ macro_rules! fuzz_with { state::{HasCorpus, StdState}, StdFuzzer, }; - use libafl_targets::{CmpLogObserver, LLVMCustomMutator, OomFeedback, OomObserver}; + use libafl_targets::{CmpLogObserver, LLVMCustomMutator, OomFeedback, OomObserver, CMP_MAP}; + use libafl_bolts::nonzero; use rand::{thread_rng, RngCore}; use std::{env::temp_dir, fs::create_dir, path::PathBuf}; - + use core::num::NonZeroUsize; use crate::{ CustomMutationStatus, corpus::{ArtifactCorpus, LibfuzzerCorpus}, @@ -203,6 +204,9 @@ macro_rules! fuzz_with { // Create the Cmp observer let cmplog_observer = CmpLogObserver::new("cmplog", true); + // Create an observer using the cmp map for value profile + let value_profile_observer = unsafe { StdMapObserver::from_mut_ptr("cmps", CMP_MAP.as_mut_ptr(), CMP_MAP.len()) }; + // Create a stacktrace observer let backtrace_observer = BacktraceObserver::owned( "BacktraceObserver", @@ -213,14 +217,27 @@ macro_rules! fuzz_with { let map_feedback = MaxMapFeedback::new(&edges_observer); let shrinking_map_feedback = ShrinkMapFeedback::new(&size_edges_observer); + // Value profile maximization feedback + let value_profile_feedback = MaxMapFeedback::new(&value_profile_observer); + // Set up a generalization stage for grimoire let generalization = GeneralizationStage::new(&edges_observer); let generalization = IfStage::new(|_, _, _, _| Ok(grimoire.into()), tuple_list!(generalization)); let calibration = CalibrationStage::new(&map_feedback); + let add_extra_feedback = $extra_feedback; + let coverage_feedback = add_extra_feedback( + feedback_or!( + map_feedback, + feedback_and_fast!(ConstFeedback::new($options.shrink()), shrinking_map_feedback), + // Time feedback, this one does not need a feedback state + TimeFeedback::new(&time_observer) + ), + value_profile_feedback + ); + // Feedback to rate the interestingness of an input - // This one is composed by two Feedbacks in OR let mut feedback = feedback_and_fast!( feedback_not!( feedback_or_fast!( @@ -230,12 +247,7 @@ macro_rules! fuzz_with { ) ), keep_observer, - feedback_or!( - map_feedback, - feedback_and_fast!(ConstFeedback::new($options.shrink()), shrinking_map_feedback), - // Time feedback, this one does not need a feedback state - TimeFeedback::new(&time_observer) - ) + coverage_feedback ); // A feedback to choose if an input is a solution or not @@ -344,7 +356,7 @@ macro_rules! fuzz_with { // TODO configure with mutation stacking options from libfuzzer let std_mutator = StdScheduledMutator::new(havoc_mutations().merge(tokens_mutations())); - let std_power = StdPowerMutationalStage::new(std_mutator); + let std_power: StdPowerMutationalStage<_, _, BytesInput, _, _> = StdPowerMutationalStage::new(std_mutator); let std_power = IfStage::new(|_, _, _, _| Ok(mutator_status.std_mutational.into()), (std_power, ())); // for custom mutator and crossover, each have access to the LLVMFuzzerMutate -- but it appears @@ -363,9 +375,10 @@ macro_rules! fuzz_with { let custom_mutator = unsafe { LLVMCustomMutator::mutate_unchecked(StdScheduledMutator::new(havoc_mutations_no_crossover().merge(tokens_mutations()))) }; - let std_mutator_no_mutate = StdScheduledMutator::with_max_stack_pow(havoc_crossover(), 3); + // Safe to unwrap: stack pow is not 0. + let std_mutator_no_mutate = StdScheduledMutator::with_max_stack_pow(havoc_crossover(),3); - let cm_power = StdPowerMutationalStage::new(custom_mutator); + let cm_power: StdPowerMutationalStage<_, _, BytesInput, _, _> = StdPowerMutationalStage::new(custom_mutator); let cm_power = IfStage::new(|_, _, _, _| Ok(mutator_status.custom_mutation.into()), (cm_power, ())); let cm_std_power = StdMutationalStage::new(std_mutator_no_mutate); let cm_std_power = @@ -374,6 +387,7 @@ macro_rules! fuzz_with { // a custom crossover is defined // while the scenario that a custom crossover is defined without a custom mutator is unlikely // we handle it here explicitly anyways + // Safe to unwrap: stack pow is not 0. let custom_crossover = unsafe { LLVMCustomMutator::crossover_unchecked(StdScheduledMutator::with_max_stack_pow( havoc_mutations_no_crossover().merge(tokens_mutations()), @@ -384,10 +398,11 @@ macro_rules! fuzz_with { let cc_power = StdMutationalStage::new(custom_crossover); let cc_power = IfStage::new(|_, _, _, _| Ok(mutator_status.custom_crossover.into()), (cc_power, ())); - let cc_std_power = StdPowerMutationalStage::new(std_mutator_no_crossover); + let cc_std_power: StdPowerMutationalStage<_, _, BytesInput, _, _> = StdPowerMutationalStage::new(std_mutator_no_crossover); let cc_std_power = IfStage::new(|_, _, _, _| Ok(mutator_status.std_no_crossover.into()), (cc_std_power, ())); + // Safe to unwrap: stack pow is not 0. let grimoire_mutator = StdScheduledMutator::with_max_stack_pow( tuple_list!( GrimoireExtensionMutator::new(), @@ -402,7 +417,7 @@ macro_rules! fuzz_with { let grimoire = IfStage::new(|_, _, _, _| Ok(grimoire.into()), (StdMutationalStage::transforming(grimoire_mutator), ())); // A minimization+queue policy to get testcasess from the corpus - let scheduler = IndexesLenTimeMinimizerScheduler::new(&edges_observer, PowerQueueScheduler::new(&mut state, &edges_observer, PowerSchedule::FAST)); + let scheduler = IndexesLenTimeMinimizerScheduler::new(&edges_observer, PowerQueueScheduler::new(&mut state, &edges_observer, PowerSchedule::fast())); // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); @@ -424,10 +439,16 @@ macro_rules! fuzz_with { let mut tracing_harness = harness; + let add_extra_observer = $extra_obsv; + let observers = add_extra_observer( + tuple_list!(edges_observer, size_edges_observer, time_observer, backtrace_observer, oom_observer), + value_profile_observer + ); + // Create the executor for an in-process function with one observer for edge coverage and one for the execution time let mut executor = InProcessExecutor::with_timeout( &mut harness, - tuple_list!(edges_observer, size_edges_observer, time_observer, backtrace_observer, oom_observer), + observers, &mut fuzzer, &mut state, &mut mgr, @@ -447,7 +468,7 @@ macro_rules! fuzz_with { } if state.corpus().count() < 1 { // Generator of bytearrays of max size 64 - let mut generator = RandBytesGenerator::from(RandBytesGenerator::new(64)); + let mut generator = RandBytesGenerator::from(RandBytesGenerator::new(nonzero!(64))); // Generate 1024 initial inputs state @@ -466,7 +487,6 @@ macro_rules! fuzz_with { } } - // Setup a tracing stage in which we log comparisons let tracing = IfStage::new(|_, _, _, _| Ok(!$options.skip_tracing()), (TracingStage::new(InProcessExecutor::new( &mut tracing_harness, @@ -500,6 +520,21 @@ macro_rules! fuzz_with { $and_then(closure) }}; + ($options:ident, $harness:ident, $operation:expr, $and_then:expr, $edge_maker:expr) => {{ + if $options.use_value_profile() { + fuzz_with!($options, $harness, $operation, $and_then, $edge_maker, + |feedback, value_profile_feedback| { + feedback_or!(feedback, value_profile_feedback) + }, + |observers, value_profile_observer| { + (value_profile_observer, observers) // Prepend the value profile observer in the tuple list + } + ) + } else { + fuzz_with!($options, $harness, $operation, $and_then, $edge_maker, |feedback, _| feedback, |observers, _| observers) + } + }}; + ($options:ident, $harness:ident, $operation:expr, $and_then:expr) => {{ use libafl::observers::{ HitcountsIterableMapObserver, HitcountsMapObserver, MultiMapObserver, StdMapObserver, diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/merge.rs b/libafl_libfuzzer/runtime/src/merge.rs similarity index 94% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/merge.rs rename to libafl_libfuzzer/runtime/src/merge.rs index 59b56f689d..506e0a2e41 100644 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/merge.rs +++ b/libafl_libfuzzer/runtime/src/merge.rs @@ -4,7 +4,6 @@ use std::{ fs::{rename, File}, io::Write, os::fd::{AsRawFd, FromRawFd}, - ptr::addr_of_mut, time::{SystemTime, UNIX_EPOCH}, }; @@ -98,7 +97,7 @@ pub fn merge( } } - let edges = unsafe { core::mem::take(&mut *addr_of_mut!(COUNTERS_MAPS)) }; + let edges = unsafe { core::mem::take(&mut *&raw mut COUNTERS_MAPS) }; let edges_observer = MultiMapObserver::new("edges", edges); let time = TimeObserver::new("time"); @@ -206,15 +205,15 @@ pub fn merge( }); } - for idx in fuzzer.scheduler().removable() { - let testcase = state.corpus_mut().remove(idx)?; + for id in fuzzer.scheduler().removable() { + let testcase = state.corpus_mut().remove(id)?; fuzzer .scheduler_mut() - .on_remove(&mut state, idx, &Some(testcase))?; + .on_remove(&mut state, id, &Some(testcase))?; } - for idx in fuzzer.scheduler().current().clone() { - let mut testcase = state.corpus_mut().get(idx)?.borrow_mut(); + for id in fuzzer.scheduler().current().clone() { + let mut testcase = state.corpus_mut().get(id)?.borrow_mut(); let file_path = testcase .file_path_mut() .as_mut() @@ -231,10 +230,10 @@ pub fn merge( new_file_path.push(base); if new_file_path.exists() { drop(testcase); - let testcase = state.corpus_mut().remove(idx)?; + let testcase = state.corpus_mut().remove(id)?; fuzzer .scheduler_mut() - .on_remove(&mut state, idx, &Some(testcase))?; + .on_remove(&mut state, id, &Some(testcase))?; } else { // False-positive: file_path is used just below #[allow(clippy::needless_borrows_for_generic_args)] diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/misc.rs b/libafl_libfuzzer/runtime/src/misc.rs similarity index 97% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/misc.rs rename to libafl_libfuzzer/runtime/src/misc.rs index 89e7c32999..5a70c0f44f 100644 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/misc.rs +++ b/libafl_libfuzzer/runtime/src/misc.rs @@ -1,8 +1,6 @@ -use std::{ - collections::{HashSet, VecDeque}, - path::PathBuf, -}; +use std::{collections::VecDeque, path::PathBuf}; +use hashbrown::HashSet; use libafl::{Error, HasMetadata}; use libafl_bolts::impl_serdeany; use serde::{Deserialize, Serialize}; diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/observers.rs b/libafl_libfuzzer/runtime/src/observers.rs similarity index 96% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/observers.rs rename to libafl_libfuzzer/runtime/src/observers.rs index a3f95e69a4..a4d90c7ec3 100644 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/observers.rs +++ b/libafl_libfuzzer/runtime/src/observers.rs @@ -165,10 +165,10 @@ where type State = M::State; } -impl Observer for MappedEdgeMapObserver +impl Observer for MappedEdgeMapObserver where - M: Observer + Debug, - O: Observer + Debug, + M: Observer + Debug, + O: Observer + Debug, S: UsesInput, { fn pre_exec(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { @@ -224,6 +224,7 @@ where impl<'it, M, O> AsIter<'it> for MappedEdgeMapObserver where M: MapObserver + for<'a> AsIter<'a, Item = M::Entry>, + M::Entry: 'it, O: ValueObserver + 'it, { type Item = O::ValueType; @@ -261,7 +262,7 @@ impl Named for SizeValueObserver { } } -impl Observer for SizeValueObserver +impl Observer for SizeValueObserver where S: UsesInput, S::Input: HasLen, @@ -305,7 +306,7 @@ impl Named for TimeValueObserver { } } -impl Observer for TimeValueObserver +impl Observer for TimeValueObserver where S: UsesInput, { @@ -367,7 +368,7 @@ impl Named for SizeTimeValueObserver { } } -impl Observer for SizeTimeValueObserver +impl Observer for SizeTimeValueObserver where S: UsesInput, S::Input: HasLen, diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/options.rs b/libafl_libfuzzer/runtime/src/options.rs similarity index 96% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/options.rs rename to libafl_libfuzzer/runtime/src/options.rs index 98d9172234..8126c50aa4 100644 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/options.rs +++ b/libafl_libfuzzer/runtime/src/options.rs @@ -107,6 +107,7 @@ pub struct LibfuzzerOptions { artifact_prefix: ArtifactPrefix, timeout: Duration, grimoire: Option, + use_value_profile: bool, unicode: bool, forks: Option, dict: Option, @@ -163,6 +164,10 @@ impl LibfuzzerOptions { self.grimoire } + pub fn use_value_profile(&self) -> bool { + self.use_value_profile + } + pub fn unicode(&self) -> bool { self.unicode } @@ -235,6 +240,7 @@ struct LibfuzzerOptionsBuilder<'a> { artifact_prefix: Option<&'a str>, timeout: Option, grimoire: Option, + use_value_profile: Option, unicode: Option, forks: Option, dict: Option<&'a str>, @@ -298,6 +304,9 @@ impl<'a> LibfuzzerOptionsBuilder<'a> { } } "grimoire" => self.grimoire = Some(parse_or_bail!(name, value, u64) > 0), + "use_value_profile" => { + self.use_value_profile = Some(parse_or_bail!(name, value, u64) > 0); + } "unicode" => self.unicode = Some(parse_or_bail!(name, value, u64) > 0), "artifact_prefix" => { self.artifact_prefix = Some(value); @@ -371,10 +380,11 @@ impl<'a> LibfuzzerOptionsBuilder<'a> { .unwrap_or_default(), timeout: self.timeout.unwrap_or(Duration::from_secs(1200)), grimoire: self.grimoire, + use_value_profile: self.use_value_profile.unwrap_or(false), unicode: self.unicode.unwrap_or(true), forks: self.forks, dict: self.dict.map(|path| { - Tokens::from_file(path).expect("Couldn't load tokens from specified dictionary") + Tokens::from_file(path).expect("Couldn't load tokens from specified tokens file") }), dirs: self.dirs.into_iter().map(PathBuf::from).collect(), ignore_crashes: self.ignore_crashes.unwrap_or_default(), diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/report.rs b/libafl_libfuzzer/runtime/src/report.rs similarity index 80% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/report.rs rename to libafl_libfuzzer/runtime/src/report.rs index 7e4353ee98..44ee09cad1 100644 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/report.rs +++ b/libafl_libfuzzer/runtime/src/report.rs @@ -1,13 +1,13 @@ use std::ffi::c_int; use libafl::{ - events::{ProgressReporter, SimpleEventManager}, + events::{EventProcessor, ProgressReporter, SimpleEventManager}, executors::HasObservers, feedbacks::MapFeedbackMetadata, inputs::UsesInput, monitors::SimpleMonitor, - stages::{HasCurrentStage, StagesTuple}, - state::{HasExecutions, HasLastReportTime}, + stages::{HasCurrentStageId, StagesTuple}, + state::{HasExecutions, HasLastReportTime, Stoppable, UsesState}, Error, Fuzzer, HasMetadata, HasNamedMetadata, }; @@ -29,9 +29,10 @@ where + HasExecutions + UsesInput + HasLastReportTime - + HasCurrentStage, - E: HasObservers, - EM: ProgressReporter, + + HasCurrentStageId + + Stoppable, + E: HasObservers + UsesState, + EM: ProgressReporter + EventProcessor, ST: StagesTuple, { let meta = state diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/schedulers.rs b/libafl_libfuzzer/runtime/src/schedulers.rs similarity index 50% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/schedulers.rs rename to libafl_libfuzzer/runtime/src/schedulers.rs index 8929671061..82ea6298a9 100644 --- a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/schedulers.rs +++ b/libafl_libfuzzer/runtime/src/schedulers.rs @@ -1,66 +1,67 @@ -use std::{ - collections::{BTreeSet, HashMap}, - marker::PhantomData, -}; +use std::{collections::BTreeSet, marker::PhantomData}; +use hashbrown::HashMap; use libafl::{ corpus::{Corpus, CorpusId, Testcase}, feedbacks::MapNoveltiesMetadata, - inputs::UsesInput, + inputs::Input, schedulers::{RemovableScheduler, Scheduler}, - state::{HasCorpus, State, UsesState}, + state::{HasCorpus, State}, Error, HasMetadata, }; #[derive(Clone, Debug)] -pub struct MergeScheduler { +pub struct MergeScheduler { mapping: HashMap, all: BTreeSet, - phantom: PhantomData, + phantom: PhantomData<(I, S)>, } -impl UsesState for MergeScheduler -where - S: State, -{ - type State = S; -} - -impl RemovableScheduler for MergeScheduler +impl RemovableScheduler for MergeScheduler where + I: Input, S: State + HasCorpus, { fn on_remove( &mut self, - _state: &mut Self::State, - idx: CorpusId, - _testcase: &Option::Input>>, + _state: &mut S, + id: CorpusId, + _testcase: &Option>, ) -> Result<(), Error> { - self.all.remove(&idx); + self.all.remove(&id); Ok(()) } } -impl Scheduler for MergeScheduler +impl Scheduler for MergeScheduler where S: State + HasCorpus, { - fn on_add(&mut self, state: &mut Self::State, idx: CorpusId) -> Result<(), Error> { - self.all.insert(idx); - let testcase = state.corpus().get(idx)?.borrow(); + fn on_add(&mut self, state: &mut S, id: CorpusId) -> Result<(), Error> { + self.all.insert(id); + let testcase = state.corpus().get(id)?.borrow(); let meta = testcase.metadata::()?; - for cov_idx in &meta.list { - self.mapping.insert(*cov_idx, idx); + for cov_ in &meta.list { + self.mapping.insert(*cov_, id); } Ok(()) } - fn next(&mut self, _state: &mut Self::State) -> Result { + fn next(&mut self, _state: &mut S) -> Result { unimplemented!("Not suitable for actual scheduling."); } + + fn set_current_scheduled( + &mut self, + state: &mut S, + next_id: Option, + ) -> Result<(), Error> { + *state.corpus_mut().current_mut() = next_id; + Ok(()) + } } -impl MergeScheduler { +impl MergeScheduler { pub fn new() -> Self { Self { mapping: HashMap::default(), diff --git a/libafl_libfuzzer/libafl_libfuzzer_runtime/src/tmin.rs b/libafl_libfuzzer/runtime/src/tmin.rs similarity index 100% rename from libafl_libfuzzer/libafl_libfuzzer_runtime/src/tmin.rs rename to libafl_libfuzzer/runtime/src/tmin.rs diff --git a/libafl_libfuzzer/src/lib.rs b/libafl_libfuzzer/src/lib.rs index 14b2ed7639..188d9be378 100644 --- a/libafl_libfuzzer/src/lib.rs +++ b/libafl_libfuzzer/src/lib.rs @@ -75,28 +75,6 @@ //! to the runtime (e.g., to prevent coverage being collected on the runtime). //! #![cfg_attr(feature = "document-features", doc = document_features::document_features!())] -#![forbid(unexpected_cfgs)] -#![warn(clippy::cargo)] -#![allow(ambiguous_glob_reexports)] -#![deny(clippy::cargo_common_metadata)] -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(clippy::all)] -#![deny(clippy::pedantic)] -#![allow( - clippy::unreadable_literal, - clippy::type_repetition_in_bounds, - clippy::missing_errors_doc, - clippy::cast_possible_truncation, - clippy::used_underscore_binding, - clippy::ptr_as_ptr, - clippy::missing_panics_doc, - clippy::missing_docs_in_private_items, - clippy::module_name_repetitions, - clippy::ptr_cast_constness, - clippy::unsafe_derive_deserialize, - clippy::similar_names, - clippy::too_many_lines -)] #![cfg_attr(not(test), warn( missing_debug_implementations, missing_docs, @@ -137,9 +115,6 @@ while_true ) )] -// Till they fix this buggy lint in clippy -#![allow(clippy::borrow_as_ptr)] -#![allow(clippy::borrow_deref_ref)] use std::ffi::{c_char, c_int}; diff --git a/libafl_libfuzzer_runtime/Cargo.toml b/libafl_libfuzzer_runtime/Cargo.toml new file mode 120000 index 0000000000..3808d639a0 --- /dev/null +++ b/libafl_libfuzzer_runtime/Cargo.toml @@ -0,0 +1 @@ +../libafl_libfuzzer/runtime/Cargo.toml.template \ No newline at end of file diff --git a/libafl_libfuzzer_runtime/README.md b/libafl_libfuzzer_runtime/README.md new file mode 100644 index 0000000000..56be89702c --- /dev/null +++ b/libafl_libfuzzer_runtime/README.md @@ -0,0 +1,6 @@ +# libafl_libfuzzer_runtime + +This is the runtime for `libafl_libfuzzer`. + +Please see the [`libafl_libfuzzer`](../libafl_libfuzzer) documentation for details. +This crate should not be used alone except in very special circumstances. \ No newline at end of file diff --git a/libafl_libfuzzer_runtime/build.rs b/libafl_libfuzzer_runtime/build.rs new file mode 100644 index 0000000000..f3926213dc --- /dev/null +++ b/libafl_libfuzzer_runtime/build.rs @@ -0,0 +1,25 @@ +use std::{env, path::Path}; + +#[allow(clippy::too_many_lines)] +fn main() { + let out_dir = env::var_os("OUT_DIR").unwrap(); + + println!("cargo:rerun-if-changed=src/harness_wrap.h"); + println!("cargo:rerun-if-changed=src/harness_wrap.cpp"); + + let build = bindgen::builder() + .header("src/harness_wrap.h") + .generate_comments(true) + .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())) + .generate() + .expect("Couldn't generate the harness wrapper!"); + + build + .write_to_file(Path::new(&out_dir).join("harness_wrap.rs")) + .expect("Couldn't write the harness wrapper!"); + + cc::Build::new() + .cpp(true) + .file("src/harness_wrap.cpp") + .compile("harness_wrap"); +} diff --git a/libafl_libfuzzer_runtime/build.sh b/libafl_libfuzzer_runtime/build.sh new file mode 100755 index 0000000000..a7a41ec663 --- /dev/null +++ b/libafl_libfuzzer_runtime/build.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +set -e + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +cd "${SCRIPT_DIR}" || exit 1 + +if [ -z ${1+x} ]; then + profile=release +else + profile="$1" +fi + +if ! cargo +nightly --version >& /dev/null; then + echo -e "You must install a recent Rust nightly to build the libafl_libfuzzer runtime!" + exit 1 +fi + +cargo +nightly build --profile "$profile" + +if [[ "$OSTYPE" == "darwin"* ]]; then + # MacOS and iOS + "${CXX:-clang++}" -dynamiclib -Wl,-force_load target/release/libafl_libfuzzer_runtime.a \ + -Wl,-U,_LLVMFuzzerInitialize -Wl,-U,_LLVMFuzzerCustomMutator -Wl,-U,_LLVMFuzzerCustomCrossOver -Wl,-U,_libafl_main \ + -o libafl_libfuzzer_runtime.dylib +else + # Linux and *BSD + RUSTC_BIN="$(cargo +nightly rustc -Zunstable-options --print target-libdir)/../bin" + RUST_LLD="${RUSTC_BIN}/rust-lld" + RUST_AR="${RUSTC_BIN}/llvm-ar" + + if ! [ -f "${RUST_LLD}" ] && [ -f "${RUST_AR}" ]; then + echo -e "You must install the llvm-tools component: \`rustup component add llvm-tools'" + exit 1 + fi + + tmpdir="" + + cleanup() { + rm -rf "${tmpdir}" + exit + } + trap cleanup INT TERM + + tmpdir="$(mktemp -d)" + "${RUST_LLD}" -flavor gnu -r --whole-archive target/release/libafl_libfuzzer_runtime.a -o "${tmpdir}/libFuzzer.o" + "${RUST_AR}" cr libFuzzer.a "${tmpdir}/libFuzzer.o" + + echo "Done! Wrote the runtime to \`${SCRIPT_DIR}/libFuzzer.a'" + cleanup +fi + diff --git a/libafl_libfuzzer_runtime/src b/libafl_libfuzzer_runtime/src new file mode 120000 index 0000000000..108b03a6ef --- /dev/null +++ b/libafl_libfuzzer_runtime/src @@ -0,0 +1 @@ +../libafl_libfuzzer/runtime/src \ No newline at end of file diff --git a/libafl_nyx/Cargo.toml b/libafl_nyx/Cargo.toml index 864d3cee42..0ebe149d6d 100644 --- a/libafl_nyx/Cargo.toml +++ b/libafl_nyx/Cargo.toml @@ -9,15 +9,35 @@ repository = "https://github.com/AFLplusplus/LibAFL/" readme = "../README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "testing", "security"] -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [target.'cfg(target_os = "linux")'.dependencies] -libnyx = { git = "https://github.com/nyx-fuzz/libnyx.git", rev = "6833d236dfe785a8a23d8c8d79e74c99fa635004" } -libafl = { path = "../libafl", version = "0.13.0", features = ["std", "libafl_derive", "frida_cli" ]} -libafl_bolts = { path = "../libafl_bolts", version = "0.13.0", features = ["std", "libafl_derive", "frida_cli" ]} -libafl_targets = { path = "../libafl_targets", version = "0.13.0", features = ["std", "sancov_cmplog"] } +libnyx = { git = "https://github.com/nyx-fuzz/libnyx.git", rev = "ea6ceb994ab975b81aea0daaf64b92a3066c1e8d" } +libafl = { workspace = true, default-features = true, features = [ + "std", + "libafl_derive", + "frida_cli", +] } +libafl_bolts = { workspace = true, default-features = true, features = [ + "std", + "libafl_derive", + "frida_cli", +] } +libafl_targets = { workspace = true, default-features = true, features = [ + "std", + "sancov_cmplog", +] } -nix = { version = "0.29", features = ["fs"] } -typed-builder = "0.18" +nix = { workspace = true, default-features = true, features = ["fs"] } +typed-builder = { workspace = true } + +[lints] +workspace = true diff --git a/libafl_nyx/README.md b/libafl_nyx/README.md index 58e5e224c8..0d954a76e6 100644 --- a/libafl_nyx/README.md +++ b/libafl_nyx/README.md @@ -1,5 +1,6 @@ -`libafl_nyx` is the `libafl`'s front-end for nyx fuzzer. This crate provides both the standalone mode and parallel mode: -- In standalone mode, no VM snapshot is serialized and stored in the working directory. That might be useful if you really want to run the fuzzer with only one process (meaning one VM). +`libafl_nyx` is the `libafl`'s front-end for the [nyx fuzzing framework](https://github.com/nyx-fuzz), which facilitates fuzzing in virtual machines such as qemu. This crate provides both the standalone mode and parallel mode: + +- In standalone mode, no VM snapshot is serialized and stored in the working directory. That might be useful if you really want to run the fuzzer with only one process (meaning one VM). - In parallel mode, the first fuzzer process (parent) has to create the VM snapshot while all other child processes will wait for the snapshot files to appear in the working directory. In order to use this crate, you need to specify the shared directory and mode in `NyxHelper`, then use `NyxExecutor`. For more details, please see `./fuzzers/nyx_libxml2_standalone` and `./fuzzers/nyx_libxml2_parallel`. \ No newline at end of file diff --git a/libafl_nyx/build_nyx_support.sh b/libafl_nyx/build_nyx_support.sh index 311436603c..4ef525e49c 100755 --- a/libafl_nyx/build_nyx_support.sh +++ b/libafl_nyx/build_nyx_support.sh @@ -1,4 +1,7 @@ #!/bin/bash + +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + echo "=================================================" echo " Nyx build script" echo "=================================================" @@ -40,7 +43,8 @@ test -e QEMU-Nyx/.git || { echo "[-] QEMU-Nyx not checked out, please install gi echo "[*] Checking QEMU-Nyx ..." if [ ! -f "QEMU-Nyx/x86_64-softmmu/qemu-system-x86_64" ]; then cd QEMU-Nyx/ || return - cp ../Makefile.libxdc ./libxdc/Makefile || exit 1 + # We need to copy our custom `Makefile.libxdc` after `git submodule update`, otherwise we get a git error. + sed -i "s,git submodule update libxdc$,git submodule update libxdc \&\& cp ../Makefile.libxdc ./libxdc/Makefile || exit 1," compile_qemu_nyx.sh ./compile_qemu_nyx.sh lto || exit 1 cd .. fi diff --git a/libafl_nyx/src/executor.rs b/libafl_nyx/src/executor.rs index d9cc30de44..5f100a5c83 100644 --- a/libafl_nyx/src/executor.rs +++ b/libafl_nyx/src/executor.rs @@ -5,9 +5,9 @@ use std::{ }; use libafl::{ - executors::{Executor, ExitKind, HasObservers}, + executors::{Executor, ExitKind, HasObservers, HasTimeout}, inputs::HasTargetBytes, - observers::{ObserversTuple, StdOutObserver, UsesObservers}, + observers::{ObserversTuple, StdOutObserver}, state::{HasExecutions, State, UsesState}, Error, }; @@ -32,6 +32,7 @@ pub struct NyxExecutor { impl NyxExecutor<(), ()> { /// Create a builder for [`NyxExeuctor`] + #[must_use] pub fn builder() -> NyxExecutorBuilder { NyxExecutorBuilder::new() } @@ -44,21 +45,13 @@ where type State = S; } -impl UsesObservers for NyxExecutor -where - OT: ObserversTuple, - S: State, -{ - type Observers = OT; -} - impl Executor for NyxExecutor where EM: UsesState, S: State + HasExecutions, S::Input: HasTargetBytes, Z: UsesState, - OT: ObserversTuple, + OT: ObserversTuple, { fn run_target( &mut self, @@ -121,27 +114,49 @@ where } }; - match self.stdout.as_mut() { - Some(ob) => { - let mut stdout = Vec::new(); - self.helper.nyx_stdout.rewind()?; - self.helper - .nyx_stdout - .read_to_end(&mut stdout) - .map_err(|e| Error::illegal_state(format!("Failed to read Nyx stdout: {e}")))?; + if let Some(ob) = self.stdout.as_mut() { + let mut stdout = Vec::new(); + self.helper.nyx_stdout.rewind()?; + self.helper + .nyx_stdout + .read_to_end(&mut stdout) + .map_err(|e| Error::illegal_state(format!("Failed to read Nyx stdout: {e}")))?; - ob.observe_stdout(&stdout); - } - None => (), + ob.observe_stdout(&stdout); } Ok(exit_kind) } } +impl HasTimeout for NyxExecutor { + fn timeout(&self) -> std::time::Duration { + self.helper.timeout + } + + fn set_timeout(&mut self, timeout: std::time::Duration) { + let micros = 1000000; + let mut timeout_secs = timeout.as_secs(); + let mut timeout_micros = timeout.as_micros() - u128::from(timeout.as_secs() * micros); + // since timeout secs is a u8 -> convert any overflow into micro secs + if timeout_secs > 255 { + timeout_micros = u128::from((timeout_secs - 255) * micros); + timeout_secs = 255; + } + + self.helper.timeout = timeout; + + self.helper + .set_timeout(timeout_secs as u8, timeout_micros as u32); + } +} + impl NyxExecutor { - /// convert `trace_bits` ptr into real trace map - pub fn trace_bits(self) -> &'static mut [u8] { + /// Convert `trace_bits` ptr into real trace map + /// + /// # Safety + /// Mutable borrow may only be used once at a time. + pub unsafe fn trace_bits(self) -> &'static mut [u8] { unsafe { std::slice::from_raw_parts_mut(self.helper.bitmap_buffer, self.helper.bitmap_size) } @@ -160,6 +175,7 @@ impl Default for NyxExecutorBuilder { } impl NyxExecutorBuilder { + #[must_use] pub fn new() -> Self { Self { stdout: None, @@ -193,8 +209,10 @@ impl NyxExecutorBuilder { impl HasObservers for NyxExecutor where S: State, - OT: ObserversTuple, + OT: ObserversTuple, { + type Observers = OT; + fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { RefIndexable::from(&self.observers) } diff --git a/libafl_nyx/src/helper.rs b/libafl_nyx/src/helper.rs index fc7b2163de..46673725be 100644 --- a/libafl_nyx/src/helper.rs +++ b/libafl_nyx/src/helper.rs @@ -1,5 +1,5 @@ /// [`NyxHelper`] is used to wrap `NyxProcess` -use std::{fmt::Debug, fs::File, path::Path}; +use std::{fmt::Debug, fs::File, path::Path, time::Duration}; use libafl::Error; use libnyx::{NyxConfig, NyxProcess, NyxProcessRole}; @@ -10,6 +10,8 @@ pub struct NyxHelper { pub nyx_process: NyxProcess, pub nyx_stdout: File, + pub timeout: Duration, + pub bitmap_size: usize, pub bitmap_buffer: *mut u8, } @@ -66,9 +68,13 @@ impl NyxHelper { let bitmap_size = nyx_process.bitmap_buffer_size(); let bitmap_buffer = nyx_process.bitmap_buffer_mut().as_mut_ptr(); + let mut timeout = Duration::from_secs(u64::from(settings.timeout_secs)); + timeout += Duration::from_micros(u64::from(settings.timeout_micro_secs)); + Ok(Self { nyx_process, nyx_stdout, + timeout, bitmap_size, bitmap_buffer, }) diff --git a/libafl_nyx/src/lib.rs b/libafl_nyx/src/lib.rs index 33c3cdce38..a1e3ffb763 100644 --- a/libafl_nyx/src/lib.rs +++ b/libafl_nyx/src/lib.rs @@ -1,6 +1,3 @@ -#![allow(clippy::module_name_repetitions, clippy::missing_panics_doc)] -#![forbid(unexpected_cfgs)] - #[cfg(target_os = "linux")] pub mod executor; #[cfg(target_os = "linux")] diff --git a/libafl_qemu/Cargo.toml b/libafl_qemu/Cargo.toml index 487732dc2f..eab48f1a79 100644 --- a/libafl_qemu/Cargo.toml +++ b/libafl_qemu/Cargo.toml @@ -9,17 +9,31 @@ readme = "../README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "qemu", "instrumentation"] edition = "2021" -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] [package.metadata.docs.rs] features = ["document-features", "default", "python", "x86_64", "usermode"] rustdoc-args = ["--cfg", "docsrs"] [features] -default = ["fork", "build_libgasan", "build_libqasan", "serdeany_autoreg", "injections"] -clippy = [] # special feature for clippy, don't use in normal projects§ +default = [ + "usermode", + "fork", + "build_libgasan", + "build_libqasan", + "serdeany_autoreg", + "injections", +] document-features = ["dep:document-features"] -paranoid_debug = ["libafl_qemu_sys/paranoid_debug"] # Will perform as many checks as possible. The target will be greatly slowed down. +paranoid_debug = [ + "libafl_qemu_sys/paranoid_debug", +] # Will perform as many checks as possible. The target will be greatly slowed down. #! # Feature Flags #! ### General Features @@ -40,9 +54,13 @@ x86_64 = ["libafl_qemu_sys/x86_64"] i386 = ["libafl_qemu_sys/i386"] # build qemu for i386 arm = ["libafl_qemu_sys/arm"] # build qemu for arm aarch64 = ["libafl_qemu_sys/aarch64"] # build qemu for aarch64 -mips = ["libafl_qemu_sys/mips"] # build qemu for mips (el, use with the 'be' feature of mips be) +mips = [ + "libafl_qemu_sys/mips", +] # build qemu for mips (el, use with the 'be' feature of mips be) ppc = ["libafl_qemu_sys/ppc"] # build qemu for powerpc hexagon = ["libafl_qemu_sys/hexagon"] # build qemu for hexagon +riscv32 = ["libafl_qemu_sys/riscv32"] # build qemu for riscv 32bit +riscv64 = ["libafl_qemu_sys/riscv64"] # build qemu for riscv 64bit ## Big Endian mode be = ["libafl_qemu_sys/be"] @@ -57,53 +75,69 @@ systemmode = ["libafl_qemu_sys/systemmode"] ## Automatically register all `#[derive(SerdeAny)]` types at startup. serdeany_autoreg = ["libafl_bolts/serdeany_autoreg"] -slirp = [ "systemmode", "libafl_qemu_sys/slirp" ] # build qemu with host libslirp (for user networking) +slirp = [ + "systemmode", + "libafl_qemu_sys/slirp", +] # build qemu with host libslirp (for user networking) # Requires the binary's build.rs to call `build_libafl_qemu` -shared = [ "libafl_qemu_sys/shared" ] +shared = ["libafl_qemu_sys/shared"] + +#! ## Internal features, don't use in normal projects +## clippy workaround +clippy = ["libafl_qemu_sys/clippy"] [dependencies] -libafl = { path = "../libafl", version = "0.13.0", default-features = false, features = ["std", "derive", "regex"] } -libafl_bolts = { path = "../libafl_bolts", version = "0.13.0", default-features = false, features = ["std", "derive"] } -libafl_targets = { path = "../libafl_targets", version = "0.13.0" } -libafl_qemu_sys = { path = "./libafl_qemu_sys", version = "0.13.0" } +libafl = { workspace = true, features = ["std", "derive", "regex"] } +libafl_bolts = { workspace = true, features = ["std", "derive"] } +libafl_targets = { workspace = true, default-features = true, version = "0.14.1" } +libafl_qemu_sys = { workspace = true } +libafl_derive = { workspace = true, default-features = true } -serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib -hashbrown = { version = "0.14", features = ["serde"] } # A faster hashmap, nostd compatible -num-traits = "0.2" -num-derive = "0.4" -num_enum = "0.7" -goblin = "0.8" -libc = "0.2" -strum = "0.26" -strum_macros = "0.26" -syscall-numbers = "3.0" -meminterval = "0.4" -thread_local = "1.1.4" +serde = { workspace = true, default-features = false, features = [ + "alloc", +] } # serialization lib +hashbrown = { workspace = true, default-features = true, features = [ + "serde", +] } # A faster hashmap, nostd compatible +num-traits = { workspace = true, default-features = true } +num-derive = "0.4.2" +num_enum = { workspace = true, default-features = true } +goblin = "0.9.2" +libc = { workspace = true } +strum = "0.26.3" +strum_macros = "0.26.4" +syscall-numbers = "4.0.0" +meminterval = { workspace = true } +thread_local = "1.1.8" capstone = "0.12.0" -rangemap = "1.3" -log = "0.4" -object = "0.36" -addr2line = "0.23" -typed-arena = "2.0" -paste = "1" -enum-map = "2.7" -serde_yaml = { version = "0.9", optional = true } # For parsing the injections yaml file -toml = { version = "0.8.13", optional = true } # For parsing the injections toml file -pyo3 = { version = "0.18", optional = true , features = ["multiple-pymethods"]} -bytes-utils = "0.1" -typed-builder = "0.18" -memmap2 = "0.9" +rangemap = { workspace = true } +log = { workspace = true } +object = "0.36.4" +addr2line = "0.24.1" +typed-arena = "2.0.2" +paste = { workspace = true } +enum-map = "2.7.3" +serde_yaml = { workspace = true, optional = true } # For parsing the injections yaml file +toml = { workspace = true, optional = true } # For parsing the injections toml file +pyo3 = { workspace = true, optional = true, features = ["multiple-pymethods"] } +bytes-utils = "0.1.4" +typed-builder = { workspace = true } +memmap2 = "0.9.5" +getset = "0.1.3" # Document all features of this crate (for `cargo doc`) -document-features = { version = "0.2", optional = true } +document-features = { workspace = true, optional = true } [build-dependencies] -libafl_qemu_build = { path = "./libafl_qemu_build", version = "0.13.0" } -pyo3-build-config = { version = "0.21", optional = true } -rustversion = "1.0" -bindgen = "0.69" -cc = "1.0" +libafl_qemu_build = { workspace = true, default-features = true, version = "0.14.1" } +pyo3-build-config = { workspace = true, optional = true } +rustversion = { workspace = true } +bindgen = { workspace = true } +cc = { workspace = true } [lib] name = "libafl_qemu" crate-type = ["cdylib", "rlib"] + +[lints] +workspace = true diff --git a/libafl_qemu/README.md b/libafl_qemu/README.md new file mode 100644 index 0000000000..ff4b4eb4ac --- /dev/null +++ b/libafl_qemu/README.md @@ -0,0 +1,24 @@ +# LibAFL QEMU + +LibAFL QEMU is a fuzzing-oriented emulation library that wraps QEMU with a rich API in Rust. + +It comes in two variants, usermode to fuzz Linux ELFs userspace binaries and systemmode, to fuzz arbitrary operating systems with QEMU TCG. + +## Cite + +If you use LibAFL QEMU for your academic work, consider citing the follwing paper: + +```bibtex +@InProceedings{libaflqemu:bar24, + title = {{LibAFL QEMU: A Library for Fuzzing-oriented Emulation}}, + author = {Romain Malmain and Andrea Fioraldi and Aurélien Francillon}, + year = {2024}, + series = {BAR 24}, + month = {March}, + booktitle = {Workshop on Binary Analysis Research (colocated with NDSS Symposium)}, + location = {San Diego (USA)}, + keywords = {fuzzing, emulation}, +} +``` + + diff --git a/libafl_qemu/build.rs b/libafl_qemu/build.rs index 1b8f971220..5346298ba4 100644 --- a/libafl_qemu/build.rs +++ b/libafl_qemu/build.rs @@ -4,8 +4,6 @@ mod host_specific { #[cfg(not(target_os = "linux"))] pub fn build() { - // Print a emulation_mode to silence clippy's unexpected cfg on macOS - println!("cargo:rustc-cfg=emulation_mode=\"usermode\""); println!("cargo:warning=libafl_qemu only builds on Linux hosts"); } } diff --git a/libafl_qemu/build_linux.rs b/libafl_qemu/build_linux.rs index 83b3420590..239ec2ede2 100644 --- a/libafl_qemu/build_linux.rs +++ b/libafl_qemu/build_linux.rs @@ -10,23 +10,25 @@ static LIBAFL_QEMU_RUNTIME_TEST: &str = r#" #include #include "libafl_qemu.h" -int main() {} +void __libafl_qemu_testfile() {} "#; #[allow(clippy::too_many_lines)] pub fn build() { // Note: Unique features are checked in libafl_qemu_sys - println!(r#"cargo::rustc-check-cfg=cfg(emulation_mode, values("usermode", "systemmode"))"#); println!( - r#"cargo::rustc-check-cfg=cfg(cpu_target, values("arm", "aarch64", "hexagon", "i386", "mips", "ppc", "x86_64"))"# + r#"cargo::rustc-check-cfg=cfg(cpu_target, values("arm", "aarch64", "hexagon", "i386", "mips", "ppc", "riscv32", "riscv64", "x86_64"))"# ); let emulation_mode = if cfg!(feature = "usermode") { - "usermode".to_string() + "usermode" } else if cfg!(feature = "systemmode") { - "systemmode".to_string() + "systemmode" } else { - env::var("EMULATION_MODE").unwrap_or_else(|_| "usermode".to_string()) + unreachable!( + "The macros `assert_unique_feature` and `assert_at_least_one_feature` in \ + `libafl_qemu_sys/build_linux.rs` should panic before this code is reached." + ); }; let src_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); @@ -45,28 +47,35 @@ pub fn build() { let qemu_asan = cfg!(all(feature = "build_libqasan", not(feature = "hexagon"))); let libafl_qemu_hdr_name = "libafl_qemu.h"; + let libafl_qemu_arch_hdr_name = "libafl_qemu_arch.h"; + let libafl_qemu_defs_hdr_name = "libafl_qemu_defs.h"; + let libafl_qemu_impl_hdr_name = "libafl_qemu_impl.h"; let libafl_runtime_dir = src_dir.join("runtime"); + let libafl_qemu_hdr = libafl_runtime_dir.join(libafl_qemu_hdr_name); + let libafl_qemu_arch_hdr = libafl_runtime_dir.join(libafl_qemu_arch_hdr_name); + let libafl_qemu_defs_hdr = libafl_runtime_dir.join(libafl_qemu_defs_hdr_name); + let libafl_qemu_impl_hdr = libafl_runtime_dir.join(libafl_qemu_impl_hdr_name); let libafl_runtime_testfile = out_dir.join("runtime_test.c"); - fs::write(&libafl_runtime_testfile, LIBAFL_QEMU_RUNTIME_TEST).expect("Could not write runtime test file"); + fs::write(&libafl_runtime_testfile, LIBAFL_QEMU_RUNTIME_TEST) + .expect("Could not write runtime test file"); let mut runtime_test_cc_compiler = cc::Build::new(); - runtime_test_cc_compiler.cpp(false) + runtime_test_cc_compiler + .cpp(false) .include(&libafl_runtime_dir) .file(&libafl_runtime_testfile); - runtime_test_cc_compiler.try_compile("runtime_test").unwrap(); + runtime_test_cc_compiler + .try_compile("runtime_test") + .unwrap(); let runtime_bindings_file = out_dir.join("libafl_qemu_bindings.rs"); let stub_runtime_bindings_file = src_dir.join("runtime/libafl_qemu_stub_bindings.rs"); - println!("cargo::rustc-check-cfg=cfg(emulation_mode, values(\"usermode\", \"systemmode\"))"); - println!("cargo:rustc-cfg=emulation_mode=\"{emulation_mode}\""); - println!("cargo:rerun-if-env-changed=EMULATION_MODE"); - println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-changed=build_linux.rs"); println!("cargo:rerun-if-changed={}", libafl_runtime_dir.display()); @@ -83,6 +92,10 @@ pub fn build() { "mips".to_string() } else if cfg!(feature = "ppc") { "ppc".to_string() + } else if cfg!(feature = "riscv32") { + "riscv32".to_string() + } else if cfg!(feature = "riscv64") { + "riscv64".to_string() } else if cfg!(feature = "hexagon") { "hexagon".to_string() } else { @@ -90,9 +103,9 @@ pub fn build() { }; println!("cargo:rerun-if-env-changed=CPU_TARGET"); println!("cargo:rustc-cfg=cpu_target=\"{cpu_target}\""); - println!("cargo::rustc-check-cfg=cfg(cpu_target, values(\"x86_64\", \"arm\", \"aarch64\", \"i386\", \"mips\", \"ppc\", \"hexagon\"))"); + println!("cargo::rustc-check-cfg=cfg(cpu_target, values(\"x86_64\", \"arm\", \"aarch64\", \"i386\", \"mips\", \"ppc\", \"hexagon\", \"riscv32\", \"riscv64\"))"); - let cross_cc = if (emulation_mode == "usermode") && (qemu_asan || qemu_asan_guest) { + let cross_cc = if cfg!(feature = "usermode") && (qemu_asan || qemu_asan_guest) { // TODO try to autodetect a cross compiler with the arch name (e.g. aarch64-linux-gnu-gcc) let cross_cc = env::var("CROSS_CC").unwrap_or_else(|_| { println!("cargo:warning=CROSS_CC is not set, default to cc (things can go wrong if the selected cpu target ({cpu_target}) is not the host arch ({}))", env::consts::ARCH); @@ -119,6 +132,25 @@ pub fn build() { ) .expect("Could not copy libafl_qemu.h to out directory."); + fs::copy( + + libafl_qemu_arch_hdr.clone(), + include_dir.join(libafl_qemu_arch_hdr_name), + ) + .expect("Could not copy libafl_qemu_arch.h to out directory."); + + fs::copy( + libafl_qemu_defs_hdr.clone(), + include_dir.join(libafl_qemu_defs_hdr_name), + ) + .expect("Could not copy libafl_qemu_defs.h to out directory."); + + fs::copy( + libafl_qemu_impl_hdr.clone(), + include_dir.join(libafl_qemu_impl_hdr_name), + ) + .expect("Could not copy libafl_qemu_impl.h to out directory."); + bindgen::Builder::default() .derive_debug(true) .derive_default(true) @@ -136,12 +168,12 @@ pub fn build() { maybe_generate_stub_bindings( &cpu_target, - &emulation_mode, + emulation_mode, stub_runtime_bindings_file.as_path(), - runtime_bindings_file.as_path() + runtime_bindings_file.as_path(), ); - if (emulation_mode == "usermode") && (qemu_asan || qemu_asan_guest) { + if cfg!(feature = "usermode") && (qemu_asan || qemu_asan_guest) { let qasan_dir = Path::new("libqasan"); let qasan_dir = fs::canonicalize(qasan_dir).unwrap(); println!("cargo:rerun-if-changed={}", qasan_dir.display()); diff --git a/libafl_qemu/libafl_qemu_build/Cargo.toml b/libafl_qemu/libafl_qemu_build/Cargo.toml index cc7450e6ca..7dbca22934 100644 --- a/libafl_qemu/libafl_qemu_build/Cargo.toml +++ b/libafl_qemu/libafl_qemu_build/Cargo.toml @@ -1,20 +1,20 @@ [package] name = "libafl_qemu_build" -version = "0.13.0" +version.workspace = true authors = ["Andrea Fioraldi "] description = "Builder for LibAFL QEMU" documentation = "https://docs.rs/libafl_qemu_build" repository = "https://github.com/AFLplusplus/LibAFL/" readme = "./README.md" license = "MIT OR Apache-2.0" -keywords =["fuzzing", "qemu", "instrumentation"] +keywords = ["fuzzing", "qemu", "instrumentation"] edition = "2021" categories = [ - "development-tools::testing", - "emulators", - "embedded", - "os", - "no-std", + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", ] [package.metadata.docs.rs] @@ -22,19 +22,23 @@ all-features = true [features] shared = [] -slirp = [] # build qemu with host libslirp (for user networking) +slirp = [] # build qemu with host libslirp (for user networking) clippy = [] # special feature for clippy, don't use in normal projects§ -paranoid_debug = [] # Will perform as many checks as possible. The target will be greatly slowed down. +paranoid_debug = [ +] # Will perform as many checks as possible. The target will be greatly slowed down. [dependencies] -bindgen = "0.69.4" -which = "6.0" -json = "0.12" -shell-words = "1.1" -pkg-config = "0.3.26" -cc = "1.0" -regex = "1" -rustversion = "1.0" -rustc_version = "0.4" \ No newline at end of file +bindgen = { workspace = true } +which = { workspace = true } +json = "0.12.4" +shell-words = "1.1.0" +pkg-config = "0.3.31" +cc = { workspace = true } +regex = { workspace = true } +rustversion = { workspace = true } +rustc_version = "0.4.1" + +[lints] +workspace = true diff --git a/libafl_qemu/libafl_qemu_build/src/bindings.rs b/libafl_qemu/libafl_qemu_build/src/bindings.rs index a07a914948..1733cff770 100644 --- a/libafl_qemu/libafl_qemu_build/src/bindings.rs +++ b/libafl_qemu/libafl_qemu_build/src/bindings.rs @@ -58,8 +58,11 @@ const WRAPPER_HEADER: &str = r#" #include "hw/core/sysemu-cpu-ops.h" #include "exec/address-spaces.h" #include "sysemu/tcg.h" +#include "sysemu/runstate.h" #include "sysemu/replay.h" +#include "libafl/system.h" +#include "libafl/qemu_snapshot.h" #include "libafl/syx-snapshot/device-save.h" #include "libafl/syx-snapshot/syx-snapshot.h" @@ -77,15 +80,30 @@ const WRAPPER_HEADER: &str = r#" #include "tcg/tcg.h" #include "tcg/tcg-op.h" #include "tcg/tcg-internal.h" -#include "exec/helper-head.h" #include "qemu/plugin-memory.h" +#include "libafl/cpu.h" +#include "libafl/gdb.h" #include "libafl/exit.h" -#include "libafl/hook.h" #include "libafl/jit.h" #include "libafl/utils.h" +#include "libafl/hook.h" + +#include "libafl/hooks/tcg/backdoor.h" +#include "libafl/hooks/tcg/block.h" +#include "libafl/hooks/tcg/cmp.h" +#include "libafl/hooks/tcg/edge.h" +#include "libafl/hooks/tcg/instruction.h" +#include "libafl/hooks/tcg/read_write.h" +#include "libafl/hooks/cpu_run.h" +#include "libafl/hooks/thread.h" + +#ifdef CONFIG_USER_ONLY +#include "libafl/hooks/syscall.h" +#endif + "#; pub fn generate( @@ -95,7 +113,13 @@ pub fn generate( ) -> Result { let wrapper_h = build_dir.join("wrapper.h"); - store_generated_content_if_different(&wrapper_h, WRAPPER_HEADER.as_bytes(), None, None, false); + store_generated_content_if_different( + &wrapper_h, + WRAPPER_HEADER.as_bytes(), + None, + vec![], + false, + ); let bindings = bindgen::Builder::default() .derive_debug(true) @@ -108,6 +132,11 @@ pub fn generate( }) .header(wrapper_h.display().to_string()) .clang_args(clang_args) + .allowlist_var("libafl_dump_core_hook") + .allowlist_var("libafl_force_dfl") + .allowlist_var("mmap_next_start") + .allowlist_var("guest_base") + .allowlist_var("exec_path") .allowlist_type("target_ulong") .allowlist_type("target_long") .allowlist_type("CPUState") @@ -127,7 +156,7 @@ pub fn generate( .allowlist_type("Syx.*") .allowlist_type("libafl_mapinfo") .allowlist_type("IntervalTreeRoot") - .allowlist_function("qemu_user_init") + .allowlist_function("qemu_system_debug_request") .allowlist_function("target_mmap") .allowlist_function("target_mprotect") .allowlist_function("target_munmap") @@ -147,7 +176,12 @@ pub fn generate( .allowlist_function("read_self_maps") .allowlist_function("free_self_maps") .allowlist_function("pageflags_get_root") + .allowlist_function("vm_start") + .allowlist_function("qemu_main_loop") + .allowlist_function("qemu_cleanup") .blocklist_function("main_loop_wait") // bindgen issue #1313 + .blocklist_type("siginfo_t") + .raw_line("use libc::siginfo_t;") .parse_callbacks(Box::new(bindgen::CargoCallbacks::new())); // arch specific functions @@ -159,6 +193,10 @@ pub fn generate( bindings .allowlist_type("ARMCPU") .allowlist_type("ARMv7MState") + } else if cpu_target == "riscv32" || cpu_target == "riscv64" { + bindings + .allowlist_type("RISCVCPU") + .allowlist_type("CPURISCVState") } else { bindings }; diff --git a/libafl_qemu/libafl_qemu_build/src/build.rs b/libafl_qemu/libafl_qemu_build/src/build.rs index d7aaafe5cd..6828b3fea3 100644 --- a/libafl_qemu/libafl_qemu_build/src/build.rs +++ b/libafl_qemu/libafl_qemu_build/src/build.rs @@ -9,9 +9,9 @@ use which::which; use crate::cargo_add_rpath; -const QEMU_URL: &str = "https://github.com/AFLplusplus/qemu-libafl-bridge"; -const QEMU_DIRNAME: &str = "qemu-libafl-bridge"; -const QEMU_REVISION: &str = "9d2197b73bf5e66e709f9f1669467d5c84062da0"; +pub const QEMU_URL: &str = "https://github.com/AFLplusplus/qemu-libafl-bridge"; +pub const QEMU_DIRNAME: &str = "qemu-libafl-bridge"; +pub const QEMU_REVISION: &str = "b01a0bc334cf11bfc5e8f121d9520ef7f47dbcd1"; #[allow(clippy::module_name_repetitions)] pub struct BuildResult { @@ -86,12 +86,15 @@ fn configure_qemu( .env("__LIBAFL_QEMU_BUILD_OUT", build_dir.join("linkinfo.json")) .env("__LIBAFL_QEMU_BUILD_CC", cc_compiler.path()) .env("__LIBAFL_QEMU_BUILD_CXX", cpp_compiler.path()) - .arg(&format!("--cc={}", linker_interceptor.display())) - .arg(&format!("--cxx={}", linker_interceptor_plus_plus.display())) + .arg(format!("--cc={}", linker_interceptor.display())) + .arg(format!("--cxx={}", linker_interceptor_plus_plus.display())) .arg("--as-shared-lib") - .arg(&format!("--target-list={cpu_target}-{target_suffix}")) + .arg(format!("--target-list={cpu_target}-{target_suffix}")) + .arg("--disable-bsd-user") // .arg("--disable-capstone") - .arg("--disable-bsd-user"); + .arg("--disable-docs") + .arg("--disable-tests") + .arg("--disable-tools"); if cfg!(feature = "paranoid_debug") { cmd.arg("--enable-debug") @@ -137,14 +140,14 @@ fn configure_qemu( .arg("--disable-gio") .arg("--disable-glusterfs") .arg("--disable-gnutls") - .arg("--disable-gtk") - .arg("--disable-guest-agent") - .arg("--disable-guest-agent-msi") + // .arg("--disable-gtk") + // .arg("--disable-guest-agent") + // .arg("--disable-guest-agent-msi") .arg("--disable-hvf") .arg("--disable-iconv") .arg("--disable-jack") .arg("--disable-keyring") - .arg("--disable-kvm") + // .arg("--disable-kvm") .arg("--disable-libdaxctl") .arg("--disable-libiscsi") .arg("--disable-libnfs") @@ -155,7 +158,7 @@ fn configure_qemu( .arg("--disable-linux-aio") .arg("--disable-linux-io-uring") .arg("--disable-linux-user") - .arg("--disable-live-block-migration") + // .arg("--disable-live-block-migration") .arg("--disable-lzfse") .arg("--disable-lzo") .arg("--disable-l2tpv3") @@ -171,7 +174,7 @@ fn configure_qemu( .arg("--disable-pa") .arg("--disable-parallels") .arg("--disable-png") - .arg("--disable-pvrdma") + // .arg("--disable-pvrdma") .arg("--disable-qcow1") .arg("--disable-qed") .arg("--disable-qga-vss") @@ -214,8 +217,7 @@ fn configure_qemu( .arg("--disable-xen") .arg("--disable-xen-pci-passthrough") .arg("--disable-xkbcommon") - .arg("--disable-zstd") - .arg("--disable-tests"); + .arg("--disable-zstd"); } cmd @@ -237,7 +239,7 @@ fn build_qemu( .arg("-j"); if let Some(j) = jobs { - cmd.arg(&format!("{j}")).env("V", "1"); + cmd.arg(format!("{j}")).env("V", "1"); } cmd @@ -430,11 +432,11 @@ pub fn build( ); } - assert!(output_lib.is_file()); // Sanity check + assert!(output_lib.is_file()); // Make sure this isn't very very wrong /* let mut objects = vec![]; - for dir in &[ + for dir in [ build_dir.join("libcommon.fa.p"), build_dir.join(format!("libqemu-{cpu_target}-{target_suffix}.fa.p")), ] { diff --git a/libafl_qemu/libafl_qemu_build/src/lib.rs b/libafl_qemu/libafl_qemu_build/src/lib.rs index 631263a31b..57445cfe46 100644 --- a/libafl_qemu/libafl_qemu_build/src/lib.rs +++ b/libafl_qemu/libafl_qemu_build/src/lib.rs @@ -1,23 +1,20 @@ -#![forbid(unexpected_cfgs)] -#![allow(clippy::missing_panics_doc)] - -#[rustversion::nightly] -use std::io::{BufRead, BufReader}; +// #[rustversion::nightly] +// use std::io::{BufRead, BufReader}; use std::{ collections::hash_map, - env, fs, - fs::File, + env, + fs::{self, File}, hash::Hasher, io::{Read, Seek, SeekFrom, Write}, path::{Path, PathBuf}, process::Command, - ptr::addr_of_mut, + sync::{LazyLock, Mutex}, }; -#[rustversion::nightly] -use regex::Regex; -#[rustversion::nightly] -use rustc_version::Version; +//#[rustversion::nightly] +//use regex::Regex; +//#[rustversion::nightly] +//use rustc_version::Version; use which::which; mod bindings; @@ -25,27 +22,26 @@ mod build; pub use build::build; +#[rustversion::nightly] +use crate::build::QEMU_REVISION; + const LLVM_VERSION_MAX: i32 = 33; -static mut CARGO_RPATH: Option> = None; +static CARGO_RPATH: LazyLock>> = LazyLock::new(Mutex::default); static CARGO_RPATH_SEPARATOR: &str = "|"; +// Add to the list of `rpath`s. +// Later, print the `cargo::rpath` using [`cargo_propagate_rpath`] pub fn cargo_add_rpath(rpath: &str) { - unsafe { - if let Some(rpaths) = &mut *addr_of_mut!(CARGO_RPATH) { - rpaths.push(rpath.to_string()); - } else { - CARGO_RPATH = Some(vec![rpath.to_string()]); - } - } + CARGO_RPATH.lock().unwrap().push(rpath.to_string()); } +// Print the `rpath`, set via [`cargo_add_rpath`] as `cargo::rpath` pub fn cargo_propagate_rpath() { - unsafe { - if let Some(cargo_cmds) = &mut *addr_of_mut!(CARGO_RPATH) { - let rpath = cargo_cmds.join(CARGO_RPATH_SEPARATOR); - println!("cargo:rpath={rpath}"); - } + let cargo_cmds = CARGO_RPATH.lock().unwrap(); + if !cargo_cmds.is_empty() { + let rpath = cargo_cmds.join(CARGO_RPATH_SEPARATOR); + println!("cargo:rpath={rpath}"); } } @@ -105,13 +101,14 @@ fn find_llvm_config() -> Result { if which("llvm-config").is_ok() { if let Some(ver) = find_llvm_version("llvm-config".to_owned()) { - if ver >= rustc_llvm_ver { - return Ok("llvm-config".to_owned()); + if ver < rustc_llvm_ver { + println!("cargo:warning=Version of llvm-config is {ver} but needs to be at least rustc's version ({rustc_llvm_ver})! We will (try to) continue to build. Continue at your own risk, or rebuild with a set LLVM_CONFIG_PATH env variable, pointing to a newer version."); } + return Ok("llvm-config".to_owned()); } } - Err("could not find llvm-config".to_owned()) + Err("Could not find llvm-config".to_owned()) } fn exec_llvm_config(llvm_config: String, args: &[&str]) -> String { @@ -165,7 +162,7 @@ fn qemu_bindgen_clang_args( is_usermode: bool, ) -> Vec { if env::var("LLVM_CONFIG_PATH").is_err() { - let found = find_llvm_config().expect("Cannot find a suitable llvm-config, it must be a version equal or greater than the rustc LLVM version"); + let found = find_llvm_config().expect("Cannot find a suitable llvm-config, it must be a version equal or greater than the rustc LLVM version. Try specifying LLVM_CONFIG_PATH."); env::set_var("LLVM_CONFIG_PATH", found); } @@ -195,7 +192,7 @@ fn qemu_bindgen_clang_args( entry["output"] == main_obj || entry["file"] .as_str() - .map_or(false, |file| file.ends_with(main_file)) + .is_some_and(|file| file.ends_with(main_file)) }) .expect("Didn't find compile command for qemu-system-arm"); @@ -226,6 +223,7 @@ fn qemu_bindgen_clang_args( let target_arch_dir = match cpu_target { "x86_64" => format!("-I{}/target/i386", qemu_dir.display()), "aarch64" => format!("-I{}/target/arm", qemu_dir.display()), + "riscv32" | "riscv64" => format!("-I{}/target/riscv", qemu_dir.display()), _ => format!("-I{}/target/{cpu_target}", qemu_dir.display()), }; @@ -249,13 +247,15 @@ fn include_path(build_dir: &Path, path: &str) -> String { } } -/// If `fresh_content` != `content_file_to_update` (the file is read directly if `content_file_to_update` is None), update the file. prefix is not considered for comparison. +/// If `fresh_content` != `content_file_to_update` (the file is read directly if `content_file_to_update` is None), update the file. +/// +/// The prefix is not considered for comparison. /// If a prefix is given, it will be added as the first line of the file. pub fn store_generated_content_if_different( file_to_update: &Path, fresh_content: &[u8], content_file_to_update: Option>, - first_line_prefix: Option<&str>, + first_line_prefixes: Vec<&str>, force_regeneration: bool, ) { let mut must_rewrite_file = true; @@ -297,7 +297,12 @@ pub fn store_generated_content_if_different( }; if must_rewrite_file { - if let Some(prefix) = first_line_prefix { + println!( + "cargo::warning={} has been regenerated.", + file_to_update.file_name().unwrap().to_str().unwrap() + ); + + for prefix in first_line_prefixes { writeln!(&file_to_check, "{prefix}").expect("Could not write prefix"); } @@ -306,82 +311,116 @@ pub fn store_generated_content_if_different( .unwrap_or_else(|_| panic!("Unable to write in {}", file_to_update.display())); } } + +//#[rustversion::nightly] +//fn parse_stub( +// stub_bindings_file: &Path, +// current_rustc_version: &Version, +//) -> (bool, bool, Option>) { +// let semver_re = Regex::new(r"/\* (.*) \*/").unwrap(); +// let qemu_hash_re = Regex::new(r"/\* qemu git hash: (.*) \*/").unwrap(); +// +// if let Ok(stub_file) = File::open(stub_bindings_file) { +// let mut stub_rdr = BufReader::new(stub_file); +// +// let mut first_line = String::new(); // rustc version +// let mut second_line = String::new(); // qemu hash +// let mut stub_content = Vec::::new(); +// +// assert!( +// stub_rdr +// .read_line(&mut first_line) +// .expect("Could not read first line") +// > 0, +// "Error while reading first line." +// ); +// +// assert!( +// stub_rdr +// .read_line(&mut second_line) +// .expect("Could not read second line") +// > 0, +// "Error while reading second line." +// ); +// +// if let Some((_, [version_str])) = semver_re +// .captures_iter(&first_line) +// .next() +// .map(|caps| caps.extract()) +// { +// // The first line matches the regex +// +// if let Some((_, [qemu_hash_str])) = qemu_hash_re +// .captures_iter(&second_line) +// .next() +// .map(|caps| caps.extract()) +// { +// // The second line matches the regex +// +// if let Ok(version) = Version::parse(version_str) { +// // The first line contains a version +// +// stub_rdr +// .read_to_end(&mut stub_content) +// .expect("could not read stub content"); +// +// return ( +// (current_rustc_version > &version) || (qemu_hash_str != QEMU_REVISION), +// false, +// Some(stub_content), +// ); +// } +// } +// } +// +// stub_rdr.seek(SeekFrom::Start(0)).unwrap(); +// stub_rdr +// .read_to_end(&mut stub_content) +// .expect("could not read stub content"); +// +// (true, true, Some(stub_content)) +// } else { +// // No stub file stored +// (true, true, None) +// } +//} + #[rustversion::nightly] +#[allow(unused)] pub fn maybe_generate_stub_bindings( cpu_target: &str, emulation_mode: &str, stub_bindings_file: &Path, bindings_file: &Path, ) { - if cpu_target == "x86_64" && emulation_mode == "usermode" { + if env::var("LIBAFL_QEMU_GEN_STUBS").is_ok() + && cpu_target == "x86_64" + && emulation_mode == "usermode" + { let current_rustc_version = rustc_version::version().expect("Could not get current rustc version"); - let semver_re = Regex::new(r"/\* (.*) \*/").unwrap(); // We only try to store the stub if the current rustc version is strictly bigger than the one used to generate - // the versioned stub. - let (try_generate, force_regeneration, stub_content): (bool, bool, Option>) = - if let Ok(stub_file) = File::open(stub_bindings_file) { - let mut stub_rdr = BufReader::new(stub_file); - - let mut first_line = String::new(); - let mut stub_content = Vec::::new(); - assert!( - stub_rdr - .read_line(&mut first_line) - .expect("Could not read first line") - > 0, - "Error while reading first line." - ); - - if let Some((_, [version_str])) = semver_re - .captures_iter(&first_line) - .next() - .map(|caps| caps.extract()) - { - // The first line matches the regex - - if let Ok(version) = Version::parse(version_str) { - // The first line contains a version - - stub_rdr - .read_to_end(&mut stub_content) - .expect("could not read stub content"); - (current_rustc_version > version, false, Some(stub_content)) - } else { - stub_rdr.seek(SeekFrom::Start(0)).unwrap(); - stub_rdr - .read_to_end(&mut stub_content) - .expect("could not read stub content"); - - (true, true, Some(stub_content)) - } - } else { - stub_rdr.seek(SeekFrom::Start(0)).unwrap(); - stub_rdr - .read_to_end(&mut stub_content) - .expect("could not read stub content"); - - (true, true, Some(stub_content)) - } - } else { - // No stub file stored - (true, true, None) - }; + // the versioned stub or the qemu hash differs. + // let (try_generate, force_regeneration, stub_content) = + // parse_stub(stub_bindings_file, ¤t_rustc_version); let header = format!("/* {current_rustc_version} */"); - if try_generate { - store_generated_content_if_different( - stub_bindings_file, - fs::read(bindings_file) - .expect("Could not read generated bindings file") - .as_slice(), - stub_content, - Some(header.as_str()), - force_regeneration, - ); - } + store_generated_content_if_different( + stub_bindings_file, + fs::read(bindings_file) + .expect("Could not read generated bindings file") + .as_slice(), + None, + vec![ + header.as_str(), + format!("/* qemu git hash: {QEMU_REVISION} */").as_str(), + ], + false, + ); + } else if env::var("CARGO_CFG_DOC").is_ok() { + println!("cargo:warning=Bindings regeneration has been skipped. Please rerun with x86_64 with usermode to trigger the bindings regeneration."); } } diff --git a/libafl_qemu/libafl_qemu_build/src/main.rs b/libafl_qemu/libafl_qemu_build/src/main.rs deleted file mode 100644 index f37cd52a25..0000000000 --- a/libafl_qemu/libafl_qemu_build/src/main.rs +++ /dev/null @@ -1,11 +0,0 @@ -#![forbid(unexpected_cfgs)] - -use std::path::PathBuf; - -use libafl_qemu_build::build_with_bindings; - -// RUST_BACKTRACE=1 OUT_DIR=/tmp/foo/a/b/c cargo run -fn main() { - let bfile = PathBuf::from("generated_qemu_bindings.rs"); - build_with_bindings("arm", false, false, None, &bfile); -} diff --git a/libafl_qemu/libafl_qemu_sys/Cargo.toml b/libafl_qemu/libafl_qemu_sys/Cargo.toml index 248263ab19..0ad98885bd 100644 --- a/libafl_qemu/libafl_qemu_sys/Cargo.toml +++ b/libafl_qemu/libafl_qemu_sys/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libafl_qemu_sys" -version = "0.13.0" +version.workspace = true authors = ["Andrea Fioraldi "] description = "C to Rust bindings for the LibAFL QEMU bridge" documentation = "https://docs.rs/libafl_qemu_sys" @@ -9,7 +9,13 @@ readme = "../../README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "qemu", "instrumentation"] edition = "2021" -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] links = "qemu" [package.metadata.docs.rs] @@ -17,14 +23,18 @@ features = ["x86_64", "usermode"] rustdoc-args = ["--cfg", "docsrs"] [features] +default = ["usermode", "x86_64"] + # The following architecture features are mutually exclusive. -x86_64 = [] # build qemu for x86_64 (default) -i386 = [] # build qemu for i386 -arm = [] # build qemu for arm +x86_64 = [] # build qemu for x86_64 +i386 = [] # build qemu for i386 +arm = [] # build qemu for arm aarch64 = [] # build qemu for aarch64 -mips = [] # build qemu for mips (el, use with the 'be' feature of mips be) -ppc = [] # build qemu for powerpc +mips = [] # build qemu for mips (el, use with the 'be' feature of mips be) +ppc = [] # build qemu for powerpc hexagon = [] # build qemu for hexagon +riscv32 = [] # build qemu for riscv 32bit +riscv64 = [] # build qemu for riscv 64bit be = [] @@ -33,22 +43,32 @@ systemmode = [] python = ["pyo3", "pyo3-build-config"] -slirp = [ "systemmode", "libafl_qemu_build/slirp" ] # build qemu with host libslirp (for user networking) -shared = [ "libafl_qemu_build/shared" ] +slirp = [ + "systemmode", + "libafl_qemu_build/slirp", +] # build qemu with host libslirp (for user networking) +shared = ["libafl_qemu_build/shared"] -clippy = [ "libafl_qemu_build/clippy" ] # special feature for clippy, don't use in normal projects +clippy = [ + "libafl_qemu_build/clippy", +] # special feature for clippy, don't use in normal projects -paranoid_debug = ["libafl_qemu_build/paranoid_debug"] # Will perform as many checks as possible. The target will be greatly slowed down. +paranoid_debug = [ + "libafl_qemu_build/paranoid_debug", +] # Will perform as many checks as possible. The target will be greatly slowed down. [dependencies] -paste = "1" -num_enum = "0.7" -libc = "0.2" -strum = "0.26" -strum_macros = "0.26" -pyo3 = { version = "0.18", optional = true } +paste = { workspace = true } +num_enum = { workspace = true, default-features = true } +libc = { workspace = true } +strum = { workspace = true } +strum_macros = { workspace = true } +pyo3 = { workspace = true, optional = true } [build-dependencies] -libafl_qemu_build = { path = "../libafl_qemu_build", version = "0.13.0" } -pyo3-build-config = { version = "0.21", optional = true } -rustversion = "1.0" +libafl_qemu_build = { workspace = true, default-features = true } +pyo3-build-config = { workspace = true, optional = true } +rustversion = { workspace = true } + +[lints] +workspace = true diff --git a/libafl_qemu/libafl_qemu_sys/build_linux.rs b/libafl_qemu/libafl_qemu_sys/build_linux.rs index 5002b41c0b..b3a5b6edf9 100644 --- a/libafl_qemu/libafl_qemu_sys/build_linux.rs +++ b/libafl_qemu/libafl_qemu_sys/build_linux.rs @@ -14,33 +14,41 @@ macro_rules! assert_unique_feature { } } -pub fn build() { - // Make sure that exactly one qemu mode is set - assert_unique_feature!("usermode", "systemmode"); - let emulation_mode = if cfg!(feature = "usermode") { - "usermode".to_string() - } else if cfg!(feature = "systemmode") { - "systemmode".to_string() - } else { - env::var("EMULATION_MODE").unwrap_or_else(|_| { - println!( - "cargo:warning=No emulation mode feature enabled or EMULATION_MODE env specified for libafl_qemu, supported: usermode, systemmmode - defaulting to usermode" - ); - "usermode".to_string() - }) +#[macro_export] +macro_rules! assert_at_least_one_feature { + ($($feature:literal),+) => { + #[cfg(not(any($(feature = $feature),+)))] + compile_error!(concat!("At least one of the following features must be enabled:", $(" ", $feature),+)); + }; +} + +pub fn build() { + // Make sure that at most one qemu mode is set + assert_unique_feature!("usermode", "systemmode"); + // Make sure that at least one qemu mode is set + assert_at_least_one_feature!("usermode", "systemmode"); + + let emulation_mode = if cfg!(feature = "usermode") { + "usermode" + } else if cfg!(feature = "systemmode") { + "systemmode" + } else { + unreachable!( + "The above macros, `assert_unique_feature` and `assert_at_least_one_feature`, should \ + panic before this code is reached." + ); }; - println!("cargo::rustc-check-cfg=cfg(emulation_mode, values(\"usermode\", \"systemmode\"))"); - println!("cargo:rustc-cfg=emulation_mode=\"{emulation_mode}\""); - println!("cargo:rerun-if-env-changed=EMULATION_MODE"); // Make sure we have at most one architecutre feature set // Else, we default to `x86_64` - having a default makes CI easier :) - assert_unique_feature!("arm", "aarch64", "i386", "x86_64", "mips", "ppc", "hexagon"); + assert_unique_feature!( + "arm", "aarch64", "i386", "x86_64", "mips", "ppc", "hexagon", "riscv32", "riscv64" + ); // Make sure that we don't have BE set for any architecture other than arm and mips // Sure aarch64 may support BE, but its not in common usage and we don't // need it yet and so haven't tested it - assert_unique_feature!("be", "aarch64", "i386", "x86_64", "hexagon"); + assert_unique_feature!("be", "aarch64", "i386", "x86_64", "hexagon", "riscv32", "riscv64"); let cpu_target = if cfg!(feature = "x86_64") { "x86_64".to_string() @@ -54,19 +62,24 @@ pub fn build() { "mips".to_string() } else if cfg!(feature = "ppc") { "ppc".to_string() + } else if cfg!(feature = "riscv32") { + "riscv32".to_string() + } else if cfg!(feature = "riscv64") { + "riscv64".to_string() } else if cfg!(feature = "hexagon") { "hexagon".to_string() } else { env::var("CPU_TARGET").unwrap_or_else(|_| { println!( - "cargo:warning=No architecture feature enabled or CPU_TARGET env specified for libafl_qemu, supported: arm, aarch64, hexagon, i386, mips, ppc, x86_64 - defaulting to x86_64" + "cargo:warning=No architecture feature enabled or CPU_TARGET env specified for libafl_qemu, supported: arm, aarch64, hexagon, i386, mips, ppc, riscv32, riscv64, x86_64 - defaulting to x86_64" ); "x86_64".to_string() }) }; println!("cargo:rerun-if-env-changed=CPU_TARGET"); + println!("cargo:rerun-if-env-changed=LIBAFL_QEMU_GEN_STUBS"); println!("cargo:rustc-cfg=cpu_target=\"{cpu_target}\""); - println!("cargo::rustc-check-cfg=cfg(cpu_target, values(\"x86_64\", \"arm\", \"aarch64\", \"i386\", \"mips\", \"ppc\", \"hexagon\"))"); + println!("cargo::rustc-check-cfg=cfg(cpu_target, values(\"x86_64\", \"arm\", \"aarch64\", \"i386\", \"mips\", \"ppc\", \"hexagon\", \"riscv32\", \"riscv64\"))"); let jobs = env::var("NUM_JOBS") .ok() @@ -78,7 +91,7 @@ pub fn build() { let src_dir = env::var("CARGO_MANIFEST_DIR").unwrap(); let src_dir = PathBuf::from(src_dir); - let stub_bindings_file = src_dir.join("src/x86_64_stub_bindings.rs"); + let stub_bindings_file = src_dir.join("src/bindings/x86_64_stub_bindings.rs"); if env::var("DOCS_RS").is_ok() || cfg!(feature = "clippy") { // Only build when we're not generating docs and not in clippy @@ -99,7 +112,7 @@ pub fn build() { // If the bindings are built and differ from the current stub, replace it with the freshly generated bindings maybe_generate_stub_bindings( &cpu_target, - &emulation_mode, + emulation_mode, stub_bindings_file.as_path(), bindings_file.as_path(), ); diff --git a/libafl_qemu/libafl_qemu_sys/src/bindings/mod.rs b/libafl_qemu/libafl_qemu_sys/src/bindings/mod.rs new file mode 100644 index 0000000000..fb25d7b7ff --- /dev/null +++ b/libafl_qemu/libafl_qemu_sys/src/bindings/mod.rs @@ -0,0 +1,18 @@ +#![allow(non_upper_case_globals)] +#![allow(non_camel_case_types)] +#![allow(non_snake_case)] +#![allow(unused_mut)] +#![allow(unused)] +#![allow(unused_variables)] +#![allow(clippy::all)] +#![allow(clippy::pedantic)] +#![allow(improper_ctypes)] + +#[cfg(all(not(feature = "clippy"), target_os = "linux"))] +#[rustfmt::skip] + include!(concat!(env!("OUT_DIR"), "/bindings.rs")); + +#[cfg(any(feature = "clippy", not(target_os = "linux")))] +mod x86_64_stub_bindings; +#[cfg(any(feature = "clippy", not(target_os = "linux")))] +pub use x86_64_stub_bindings::*; diff --git a/libafl_qemu/libafl_qemu_sys/src/bindings/x86_64_stub_bindings.rs b/libafl_qemu/libafl_qemu_sys/src/bindings/x86_64_stub_bindings.rs new file mode 100644 index 0000000000..38b54df602 --- /dev/null +++ b/libafl_qemu/libafl_qemu_sys/src/bindings/x86_64_stub_bindings.rs @@ -0,0 +1,6972 @@ +/* 1.84.0-nightly */ +/* qemu git hash: 805b14ffc44999952562e8f219d81c21a4fa50b9 */ +/* automatically generated by rust-bindgen 0.70.1 */ + +use libc::siginfo_t; + +#[repr(C)] +#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] +pub struct __BindgenBitfieldUnit { + storage: Storage, +} +impl __BindgenBitfieldUnit { + #[inline] + pub const fn new(storage: Storage) -> Self { + Self { storage } + } +} +impl __BindgenBitfieldUnit +where + Storage: AsRef<[u8]> + AsMut<[u8]>, +{ + #[inline] + pub fn get_bit(&self, index: usize) -> bool { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = self.storage.as_ref()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + byte & mask == mask + } + #[inline] + pub fn set_bit(&mut self, index: usize, val: bool) { + debug_assert!(index / 8 < self.storage.as_ref().len()); + let byte_index = index / 8; + let byte = &mut self.storage.as_mut()[byte_index]; + let bit_index = if cfg!(target_endian = "big") { + 7 - (index % 8) + } else { + index % 8 + }; + let mask = 1 << bit_index; + if val { + *byte |= mask; + } else { + *byte &= !mask; + } + } + #[inline] + pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + let mut val = 0; + for i in 0..(bit_width as usize) { + if self.get_bit(i + bit_offset) { + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + val |= 1 << index; + } + } + val + } + #[inline] + pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { + debug_assert!(bit_width <= 64); + debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); + debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); + for i in 0..(bit_width as usize) { + let mask = 1 << i; + let val_bit_is_set = val & mask == mask; + let index = if cfg!(target_endian = "big") { + bit_width as usize - 1 - i + } else { + i + }; + self.set_bit(index + bit_offset, val_bit_is_set); + } + } +} +#[repr(C)] +#[derive(Default)] +pub struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); +impl __IncompleteArrayField { + #[inline] + pub const fn new() -> Self { + __IncompleteArrayField(::std::marker::PhantomData, []) + } + #[inline] + pub fn as_ptr(&self) -> *const T { + self as *const _ as *const T + } + #[inline] + pub fn as_mut_ptr(&mut self) -> *mut T { + self as *mut _ as *mut T + } + #[inline] + pub unsafe fn as_slice(&self, len: usize) -> &[T] { + ::std::slice::from_raw_parts(self.as_ptr(), len) + } + #[inline] + pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { + ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) + } +} +impl ::std::fmt::Debug for __IncompleteArrayField { + fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + fmt.write_str("__IncompleteArrayField") + } +} +pub type __uint32_t = ::std::os::raw::c_uint; +pub type __uid_t = ::std::os::raw::c_uint; +pub type __off_t = ::std::os::raw::c_long; +pub type __off64_t = ::std::os::raw::c_long; +pub type __pid_t = ::std::os::raw::c_int; +pub type __clock_t = ::std::os::raw::c_long; +pub type off_t = __off64_t; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct __sigset_t { + pub __val: [::std::os::raw::c_ulong; 16usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of __sigset_t"][::std::mem::size_of::<__sigset_t>() - 128usize]; + ["Alignment of __sigset_t"][::std::mem::align_of::<__sigset_t>() - 8usize]; + ["Offset of field: __sigset_t::__val"][::std::mem::offset_of!(__sigset_t, __val) - 0usize]; +}; +#[repr(C)] +#[derive(Copy, Clone)] +pub union __atomic_wide_counter { + pub __value64: ::std::os::raw::c_ulonglong, + pub __value32: __atomic_wide_counter__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct __atomic_wide_counter__bindgen_ty_1 { + pub __low: ::std::os::raw::c_uint, + pub __high: ::std::os::raw::c_uint, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of __atomic_wide_counter__bindgen_ty_1"] + [::std::mem::size_of::<__atomic_wide_counter__bindgen_ty_1>() - 8usize]; + ["Alignment of __atomic_wide_counter__bindgen_ty_1"] + [::std::mem::align_of::<__atomic_wide_counter__bindgen_ty_1>() - 4usize]; + ["Offset of field: __atomic_wide_counter__bindgen_ty_1::__low"] + [::std::mem::offset_of!(__atomic_wide_counter__bindgen_ty_1, __low) - 0usize]; + ["Offset of field: __atomic_wide_counter__bindgen_ty_1::__high"] + [::std::mem::offset_of!(__atomic_wide_counter__bindgen_ty_1, __high) - 4usize]; +}; +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of __atomic_wide_counter"][::std::mem::size_of::<__atomic_wide_counter>() - 8usize]; + ["Alignment of __atomic_wide_counter"] + [::std::mem::align_of::<__atomic_wide_counter>() - 8usize]; + ["Offset of field: __atomic_wide_counter::__value64"] + [::std::mem::offset_of!(__atomic_wide_counter, __value64) - 0usize]; + ["Offset of field: __atomic_wide_counter::__value32"] + [::std::mem::offset_of!(__atomic_wide_counter, __value32) - 0usize]; +}; +impl Default for __atomic_wide_counter { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for __atomic_wide_counter { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "__atomic_wide_counter {{ union }}") + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct __pthread_internal_list { + pub __prev: *mut __pthread_internal_list, + pub __next: *mut __pthread_internal_list, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of __pthread_internal_list"][::std::mem::size_of::<__pthread_internal_list>() - 16usize]; + ["Alignment of __pthread_internal_list"] + [::std::mem::align_of::<__pthread_internal_list>() - 8usize]; + ["Offset of field: __pthread_internal_list::__prev"] + [::std::mem::offset_of!(__pthread_internal_list, __prev) - 0usize]; + ["Offset of field: __pthread_internal_list::__next"] + [::std::mem::offset_of!(__pthread_internal_list, __next) - 8usize]; +}; +impl Default for __pthread_internal_list { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub type __pthread_list_t = __pthread_internal_list; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct __pthread_mutex_s { + pub __lock: ::std::os::raw::c_int, + pub __count: ::std::os::raw::c_uint, + pub __owner: ::std::os::raw::c_int, + pub __nusers: ::std::os::raw::c_uint, + pub __kind: ::std::os::raw::c_int, + pub __spins: ::std::os::raw::c_short, + pub __elision: ::std::os::raw::c_short, + pub __list: __pthread_list_t, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of __pthread_mutex_s"][::std::mem::size_of::<__pthread_mutex_s>() - 40usize]; + ["Alignment of __pthread_mutex_s"][::std::mem::align_of::<__pthread_mutex_s>() - 8usize]; + ["Offset of field: __pthread_mutex_s::__lock"] + [::std::mem::offset_of!(__pthread_mutex_s, __lock) - 0usize]; + ["Offset of field: __pthread_mutex_s::__count"] + [::std::mem::offset_of!(__pthread_mutex_s, __count) - 4usize]; + ["Offset of field: __pthread_mutex_s::__owner"] + [::std::mem::offset_of!(__pthread_mutex_s, __owner) - 8usize]; + ["Offset of field: __pthread_mutex_s::__nusers"] + [::std::mem::offset_of!(__pthread_mutex_s, __nusers) - 12usize]; + ["Offset of field: __pthread_mutex_s::__kind"] + [::std::mem::offset_of!(__pthread_mutex_s, __kind) - 16usize]; + ["Offset of field: __pthread_mutex_s::__spins"] + [::std::mem::offset_of!(__pthread_mutex_s, __spins) - 20usize]; + ["Offset of field: __pthread_mutex_s::__elision"] + [::std::mem::offset_of!(__pthread_mutex_s, __elision) - 22usize]; + ["Offset of field: __pthread_mutex_s::__list"] + [::std::mem::offset_of!(__pthread_mutex_s, __list) - 24usize]; +}; +impl Default for __pthread_mutex_s { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct __pthread_cond_s { + pub __wseq: __atomic_wide_counter, + pub __g1_start: __atomic_wide_counter, + pub __g_refs: [::std::os::raw::c_uint; 2usize], + pub __g_size: [::std::os::raw::c_uint; 2usize], + pub __g1_orig_size: ::std::os::raw::c_uint, + pub __wrefs: ::std::os::raw::c_uint, + pub __g_signals: [::std::os::raw::c_uint; 2usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of __pthread_cond_s"][::std::mem::size_of::<__pthread_cond_s>() - 48usize]; + ["Alignment of __pthread_cond_s"][::std::mem::align_of::<__pthread_cond_s>() - 8usize]; + ["Offset of field: __pthread_cond_s::__wseq"] + [::std::mem::offset_of!(__pthread_cond_s, __wseq) - 0usize]; + ["Offset of field: __pthread_cond_s::__g1_start"] + [::std::mem::offset_of!(__pthread_cond_s, __g1_start) - 8usize]; + ["Offset of field: __pthread_cond_s::__g_refs"] + [::std::mem::offset_of!(__pthread_cond_s, __g_refs) - 16usize]; + ["Offset of field: __pthread_cond_s::__g_size"] + [::std::mem::offset_of!(__pthread_cond_s, __g_size) - 24usize]; + ["Offset of field: __pthread_cond_s::__g1_orig_size"] + [::std::mem::offset_of!(__pthread_cond_s, __g1_orig_size) - 32usize]; + ["Offset of field: __pthread_cond_s::__wrefs"] + [::std::mem::offset_of!(__pthread_cond_s, __wrefs) - 36usize]; + ["Offset of field: __pthread_cond_s::__g_signals"] + [::std::mem::offset_of!(__pthread_cond_s, __g_signals) - 40usize]; +}; +impl Default for __pthread_cond_s { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for __pthread_cond_s { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write ! (f , "__pthread_cond_s {{ __wseq: {:?}, __g1_start: {:?}, __g_refs: {:?}, __g_size: {:?}, __g1_orig_size: {:?}, __wrefs: {:?}, __g_signals: {:?} }}" , self . __wseq , self . __g1_start , self . __g_refs , self . __g_size , self . __g1_orig_size , self . __wrefs , self . __g_signals) + } +} +pub type pthread_t = ::std::os::raw::c_ulong; +#[repr(C)] +#[derive(Copy, Clone)] +pub union pthread_mutex_t { + pub __data: __pthread_mutex_s, + pub __size: [::std::os::raw::c_char; 40usize], + pub __align: ::std::os::raw::c_long, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of pthread_mutex_t"][::std::mem::size_of::() - 40usize]; + ["Alignment of pthread_mutex_t"][::std::mem::align_of::() - 8usize]; + ["Offset of field: pthread_mutex_t::__data"] + [::std::mem::offset_of!(pthread_mutex_t, __data) - 0usize]; + ["Offset of field: pthread_mutex_t::__size"] + [::std::mem::offset_of!(pthread_mutex_t, __size) - 0usize]; + ["Offset of field: pthread_mutex_t::__align"] + [::std::mem::offset_of!(pthread_mutex_t, __align) - 0usize]; +}; +impl Default for pthread_mutex_t { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for pthread_mutex_t { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "pthread_mutex_t {{ union }}") + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union pthread_cond_t { + pub __data: __pthread_cond_s, + pub __size: [::std::os::raw::c_char; 48usize], + pub __align: ::std::os::raw::c_longlong, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of pthread_cond_t"][::std::mem::size_of::() - 48usize]; + ["Alignment of pthread_cond_t"][::std::mem::align_of::() - 8usize]; + ["Offset of field: pthread_cond_t::__data"] + [::std::mem::offset_of!(pthread_cond_t, __data) - 0usize]; + ["Offset of field: pthread_cond_t::__size"] + [::std::mem::offset_of!(pthread_cond_t, __size) - 0usize]; + ["Offset of field: pthread_cond_t::__align"] + [::std::mem::offset_of!(pthread_cond_t, __align) - 0usize]; +}; +impl Default for pthread_cond_t { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for pthread_cond_t { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "pthread_cond_t {{ union }}") + } +} +pub type FILE = _IO_FILE; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _IO_marker { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _IO_codecvt { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _IO_wide_data { + _unused: [u8; 0], +} +pub type _IO_lock_t = ::std::os::raw::c_void; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _IO_FILE { + pub _flags: ::std::os::raw::c_int, + pub _IO_read_ptr: *mut ::std::os::raw::c_char, + pub _IO_read_end: *mut ::std::os::raw::c_char, + pub _IO_read_base: *mut ::std::os::raw::c_char, + pub _IO_write_base: *mut ::std::os::raw::c_char, + pub _IO_write_ptr: *mut ::std::os::raw::c_char, + pub _IO_write_end: *mut ::std::os::raw::c_char, + pub _IO_buf_base: *mut ::std::os::raw::c_char, + pub _IO_buf_end: *mut ::std::os::raw::c_char, + pub _IO_save_base: *mut ::std::os::raw::c_char, + pub _IO_backup_base: *mut ::std::os::raw::c_char, + pub _IO_save_end: *mut ::std::os::raw::c_char, + pub _markers: *mut _IO_marker, + pub _chain: *mut _IO_FILE, + pub _fileno: ::std::os::raw::c_int, + pub _flags2: ::std::os::raw::c_int, + pub _old_offset: __off_t, + pub _cur_column: ::std::os::raw::c_ushort, + pub _vtable_offset: ::std::os::raw::c_schar, + pub _shortbuf: [::std::os::raw::c_char; 1usize], + pub _lock: *mut _IO_lock_t, + pub _offset: __off64_t, + pub _codecvt: *mut _IO_codecvt, + pub _wide_data: *mut _IO_wide_data, + pub _freeres_list: *mut _IO_FILE, + pub _freeres_buf: *mut ::std::os::raw::c_void, + pub _prevchain: *mut *mut _IO_FILE, + pub _mode: ::std::os::raw::c_int, + pub _unused2: [::std::os::raw::c_char; 20usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of _IO_FILE"][::std::mem::size_of::<_IO_FILE>() - 216usize]; + ["Alignment of _IO_FILE"][::std::mem::align_of::<_IO_FILE>() - 8usize]; + ["Offset of field: _IO_FILE::_flags"][::std::mem::offset_of!(_IO_FILE, _flags) - 0usize]; + ["Offset of field: _IO_FILE::_IO_read_ptr"] + [::std::mem::offset_of!(_IO_FILE, _IO_read_ptr) - 8usize]; + ["Offset of field: _IO_FILE::_IO_read_end"] + [::std::mem::offset_of!(_IO_FILE, _IO_read_end) - 16usize]; + ["Offset of field: _IO_FILE::_IO_read_base"] + [::std::mem::offset_of!(_IO_FILE, _IO_read_base) - 24usize]; + ["Offset of field: _IO_FILE::_IO_write_base"] + [::std::mem::offset_of!(_IO_FILE, _IO_write_base) - 32usize]; + ["Offset of field: _IO_FILE::_IO_write_ptr"] + [::std::mem::offset_of!(_IO_FILE, _IO_write_ptr) - 40usize]; + ["Offset of field: _IO_FILE::_IO_write_end"] + [::std::mem::offset_of!(_IO_FILE, _IO_write_end) - 48usize]; + ["Offset of field: _IO_FILE::_IO_buf_base"] + [::std::mem::offset_of!(_IO_FILE, _IO_buf_base) - 56usize]; + ["Offset of field: _IO_FILE::_IO_buf_end"] + [::std::mem::offset_of!(_IO_FILE, _IO_buf_end) - 64usize]; + ["Offset of field: _IO_FILE::_IO_save_base"] + [::std::mem::offset_of!(_IO_FILE, _IO_save_base) - 72usize]; + ["Offset of field: _IO_FILE::_IO_backup_base"] + [::std::mem::offset_of!(_IO_FILE, _IO_backup_base) - 80usize]; + ["Offset of field: _IO_FILE::_IO_save_end"] + [::std::mem::offset_of!(_IO_FILE, _IO_save_end) - 88usize]; + ["Offset of field: _IO_FILE::_markers"][::std::mem::offset_of!(_IO_FILE, _markers) - 96usize]; + ["Offset of field: _IO_FILE::_chain"][::std::mem::offset_of!(_IO_FILE, _chain) - 104usize]; + ["Offset of field: _IO_FILE::_fileno"][::std::mem::offset_of!(_IO_FILE, _fileno) - 112usize]; + ["Offset of field: _IO_FILE::_flags2"][::std::mem::offset_of!(_IO_FILE, _flags2) - 116usize]; + ["Offset of field: _IO_FILE::_old_offset"] + [::std::mem::offset_of!(_IO_FILE, _old_offset) - 120usize]; + ["Offset of field: _IO_FILE::_cur_column"] + [::std::mem::offset_of!(_IO_FILE, _cur_column) - 128usize]; + ["Offset of field: _IO_FILE::_vtable_offset"] + [::std::mem::offset_of!(_IO_FILE, _vtable_offset) - 130usize]; + ["Offset of field: _IO_FILE::_shortbuf"] + [::std::mem::offset_of!(_IO_FILE, _shortbuf) - 131usize]; + ["Offset of field: _IO_FILE::_lock"][::std::mem::offset_of!(_IO_FILE, _lock) - 136usize]; + ["Offset of field: _IO_FILE::_offset"][::std::mem::offset_of!(_IO_FILE, _offset) - 144usize]; + ["Offset of field: _IO_FILE::_codecvt"][::std::mem::offset_of!(_IO_FILE, _codecvt) - 152usize]; + ["Offset of field: _IO_FILE::_wide_data"] + [::std::mem::offset_of!(_IO_FILE, _wide_data) - 160usize]; + ["Offset of field: _IO_FILE::_freeres_list"] + [::std::mem::offset_of!(_IO_FILE, _freeres_list) - 168usize]; + ["Offset of field: _IO_FILE::_freeres_buf"] + [::std::mem::offset_of!(_IO_FILE, _freeres_buf) - 176usize]; + ["Offset of field: _IO_FILE::_prevchain"] + [::std::mem::offset_of!(_IO_FILE, _prevchain) - 184usize]; + ["Offset of field: _IO_FILE::_mode"][::std::mem::offset_of!(_IO_FILE, _mode) - 192usize]; + ["Offset of field: _IO_FILE::_unused2"][::std::mem::offset_of!(_IO_FILE, _unused2) - 196usize]; +}; +impl Default for _IO_FILE { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub type __jmp_buf = [::std::os::raw::c_long; 8usize]; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct __jmp_buf_tag { + pub __jmpbuf: __jmp_buf, + pub __mask_was_saved: ::std::os::raw::c_int, + pub __saved_mask: __sigset_t, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of __jmp_buf_tag"][::std::mem::size_of::<__jmp_buf_tag>() - 200usize]; + ["Alignment of __jmp_buf_tag"][::std::mem::align_of::<__jmp_buf_tag>() - 8usize]; + ["Offset of field: __jmp_buf_tag::__jmpbuf"] + [::std::mem::offset_of!(__jmp_buf_tag, __jmpbuf) - 0usize]; + ["Offset of field: __jmp_buf_tag::__mask_was_saved"] + [::std::mem::offset_of!(__jmp_buf_tag, __mask_was_saved) - 64usize]; + ["Offset of field: __jmp_buf_tag::__saved_mask"] + [::std::mem::offset_of!(__jmp_buf_tag, __saved_mask) - 72usize]; +}; +pub type sigjmp_buf = [__jmp_buf_tag; 1usize]; +#[repr(C)] +#[derive(Copy, Clone)] +pub union sigval { + pub sival_int: ::std::os::raw::c_int, + pub sival_ptr: *mut ::std::os::raw::c_void, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of sigval"][::std::mem::size_of::() - 8usize]; + ["Alignment of sigval"][::std::mem::align_of::() - 8usize]; + ["Offset of field: sigval::sival_int"][::std::mem::offset_of!(sigval, sival_int) - 0usize]; + ["Offset of field: sigval::sival_ptr"][::std::mem::offset_of!(sigval, sival_ptr) - 0usize]; +}; +impl Default for sigval { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for sigval { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "sigval {{ union }}") + } +} +pub type __sigval_t = sigval; +#[repr(C)] +#[derive(Copy, Clone)] +pub union siginfo_t__bindgen_ty_1 { + pub _pad: [::std::os::raw::c_int; 28usize], + pub _kill: siginfo_t__bindgen_ty_1__bindgen_ty_1, + pub _timer: siginfo_t__bindgen_ty_1__bindgen_ty_2, + pub _rt: siginfo_t__bindgen_ty_1__bindgen_ty_3, + pub _sigchld: siginfo_t__bindgen_ty_1__bindgen_ty_4, + pub _sigfault: siginfo_t__bindgen_ty_1__bindgen_ty_5, + pub _sigpoll: siginfo_t__bindgen_ty_1__bindgen_ty_6, + pub _sigsys: siginfo_t__bindgen_ty_1__bindgen_ty_7, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct siginfo_t__bindgen_ty_1__bindgen_ty_1 { + pub si_pid: __pid_t, + pub si_uid: __uid_t, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of siginfo_t__bindgen_ty_1__bindgen_ty_1"] + [::std::mem::size_of::() - 8usize]; + ["Alignment of siginfo_t__bindgen_ty_1__bindgen_ty_1"] + [::std::mem::align_of::() - 4usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_1::si_pid"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_1, si_pid) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_1::si_uid"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_1, si_uid) - 4usize]; +}; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct siginfo_t__bindgen_ty_1__bindgen_ty_2 { + pub si_tid: ::std::os::raw::c_int, + pub si_overrun: ::std::os::raw::c_int, + pub si_sigval: __sigval_t, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of siginfo_t__bindgen_ty_1__bindgen_ty_2"] + [::std::mem::size_of::() - 16usize]; + ["Alignment of siginfo_t__bindgen_ty_1__bindgen_ty_2"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_2::si_tid"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_2, si_tid) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_2::si_overrun"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_2, si_overrun) - 4usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_2::si_sigval"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_2, si_sigval) - 8usize]; +}; +impl Default for siginfo_t__bindgen_ty_1__bindgen_ty_2 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for siginfo_t__bindgen_ty_1__bindgen_ty_2 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write ! (f , "siginfo_t__bindgen_ty_1__bindgen_ty_2 {{ si_tid: {:?}, si_overrun: {:?}, si_sigval: {:?} }}" , self . si_tid , self . si_overrun , self . si_sigval) + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct siginfo_t__bindgen_ty_1__bindgen_ty_3 { + pub si_pid: __pid_t, + pub si_uid: __uid_t, + pub si_sigval: __sigval_t, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of siginfo_t__bindgen_ty_1__bindgen_ty_3"] + [::std::mem::size_of::() - 16usize]; + ["Alignment of siginfo_t__bindgen_ty_1__bindgen_ty_3"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_3::si_pid"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_3, si_pid) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_3::si_uid"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_3, si_uid) - 4usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_3::si_sigval"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_3, si_sigval) - 8usize]; +}; +impl Default for siginfo_t__bindgen_ty_1__bindgen_ty_3 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for siginfo_t__bindgen_ty_1__bindgen_ty_3 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write ! (f , "siginfo_t__bindgen_ty_1__bindgen_ty_3 {{ si_pid: {:?}, si_uid: {:?}, si_sigval: {:?} }}" , self . si_pid , self . si_uid , self . si_sigval) + } +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct siginfo_t__bindgen_ty_1__bindgen_ty_4 { + pub si_pid: __pid_t, + pub si_uid: __uid_t, + pub si_status: ::std::os::raw::c_int, + pub si_utime: __clock_t, + pub si_stime: __clock_t, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of siginfo_t__bindgen_ty_1__bindgen_ty_4"] + [::std::mem::size_of::() - 32usize]; + ["Alignment of siginfo_t__bindgen_ty_1__bindgen_ty_4"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_4::si_pid"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_4, si_pid) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_4::si_uid"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_4, si_uid) - 4usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_4::si_status"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_4, si_status) - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_4::si_utime"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_4, si_utime) - 16usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_4::si_stime"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_4, si_stime) - 24usize]; +}; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct siginfo_t__bindgen_ty_1__bindgen_ty_5 { + pub si_addr: *mut ::std::os::raw::c_void, + pub si_addr_lsb: ::std::os::raw::c_short, + pub _bounds: siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1 { + pub _addr_bnd: siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1__bindgen_ty_1, + pub _pkey: __uint32_t, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1__bindgen_ty_1 { + pub _lower: *mut ::std::os::raw::c_void, + pub _upper: *mut ::std::os::raw::c_void, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1__bindgen_ty_1"] + [::std::mem::size_of::( + ) - 16usize]; + ["Alignment of siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1__bindgen_ty_1"] + [::std::mem::align_of::( + ) - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1__bindgen_ty_1::_lower"] [:: std :: mem :: offset_of ! (siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1__bindgen_ty_1 , _lower) - 0usize] ; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1__bindgen_ty_1::_upper"] [:: std :: mem :: offset_of ! (siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1__bindgen_ty_1 , _upper) - 8usize] ; +}; +impl Default for siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1"] + [::std::mem::size_of::() - 16usize]; + ["Alignment of siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1::_addr_bnd"][::std::mem::offset_of!( + siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1, + _addr_bnd + ) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1::_pkey"][::std::mem::offset_of!( + siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1, + _pkey + ) - 0usize]; +}; +impl Default for siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "siginfo_t__bindgen_ty_1__bindgen_ty_5__bindgen_ty_1 {{ union }}" + ) + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of siginfo_t__bindgen_ty_1__bindgen_ty_5"] + [::std::mem::size_of::() - 32usize]; + ["Alignment of siginfo_t__bindgen_ty_1__bindgen_ty_5"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_5::si_addr"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_5, si_addr) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_5::si_addr_lsb"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_5, si_addr_lsb) - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_5::_bounds"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_5, _bounds) - 16usize]; +}; +impl Default for siginfo_t__bindgen_ty_1__bindgen_ty_5 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for siginfo_t__bindgen_ty_1__bindgen_ty_5 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write ! (f , "siginfo_t__bindgen_ty_1__bindgen_ty_5 {{ si_addr: {:?}, si_addr_lsb: {:?}, _bounds: {:?} }}" , self . si_addr , self . si_addr_lsb , self . _bounds) + } +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct siginfo_t__bindgen_ty_1__bindgen_ty_6 { + pub si_band: ::std::os::raw::c_long, + pub si_fd: ::std::os::raw::c_int, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of siginfo_t__bindgen_ty_1__bindgen_ty_6"] + [::std::mem::size_of::() - 16usize]; + ["Alignment of siginfo_t__bindgen_ty_1__bindgen_ty_6"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_6::si_band"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_6, si_band) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_6::si_fd"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_6, si_fd) - 8usize]; +}; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct siginfo_t__bindgen_ty_1__bindgen_ty_7 { + pub _call_addr: *mut ::std::os::raw::c_void, + pub _syscall: ::std::os::raw::c_int, + pub _arch: ::std::os::raw::c_uint, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of siginfo_t__bindgen_ty_1__bindgen_ty_7"] + [::std::mem::size_of::() - 16usize]; + ["Alignment of siginfo_t__bindgen_ty_1__bindgen_ty_7"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_7::_call_addr"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_7, _call_addr) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_7::_syscall"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_7, _syscall) - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1__bindgen_ty_7::_arch"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1__bindgen_ty_7, _arch) - 12usize]; +}; +impl Default for siginfo_t__bindgen_ty_1__bindgen_ty_7 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of siginfo_t__bindgen_ty_1"] + [::std::mem::size_of::() - 112usize]; + ["Alignment of siginfo_t__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: siginfo_t__bindgen_ty_1::_pad"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1, _pad) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1::_kill"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1, _kill) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1::_timer"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1, _timer) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1::_rt"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1, _rt) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1::_sigchld"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1, _sigchld) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1::_sigfault"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1, _sigfault) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1::_sigpoll"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1, _sigpoll) - 0usize]; + ["Offset of field: siginfo_t__bindgen_ty_1::_sigsys"] + [::std::mem::offset_of!(siginfo_t__bindgen_ty_1, _sigsys) - 0usize]; +}; +impl Default for siginfo_t__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for siginfo_t__bindgen_ty_1 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "siginfo_t__bindgen_ty_1 {{ union }}") + } +} +pub type guint8 = ::std::os::raw::c_uchar; +pub type gchar = ::std::os::raw::c_char; +pub type guint = ::std::os::raw::c_uint; +pub type gpointer = *mut ::std::os::raw::c_void; +pub type GArray = _GArray; +pub type GByteArray = _GByteArray; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _GArray { + pub data: *mut gchar, + pub len: guint, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of _GArray"][::std::mem::size_of::<_GArray>() - 16usize]; + ["Alignment of _GArray"][::std::mem::align_of::<_GArray>() - 8usize]; + ["Offset of field: _GArray::data"][::std::mem::offset_of!(_GArray, data) - 0usize]; + ["Offset of field: _GArray::len"][::std::mem::offset_of!(_GArray, len) - 8usize]; +}; +impl Default for _GArray { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _GByteArray { + pub data: *mut guint8, + pub len: guint, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of _GByteArray"][::std::mem::size_of::<_GByteArray>() - 16usize]; + ["Alignment of _GByteArray"][::std::mem::align_of::<_GByteArray>() - 8usize]; + ["Offset of field: _GByteArray::data"][::std::mem::offset_of!(_GByteArray, data) - 0usize]; + ["Offset of field: _GByteArray::len"][::std::mem::offset_of!(_GByteArray, len) - 8usize]; +}; +impl Default for _GByteArray { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _GHashTable { + _unused: [u8; 0], +} +pub type GHashTable = _GHashTable; +pub type GSList = _GSList; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _GSList { + pub data: gpointer, + pub next: *mut GSList, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of _GSList"][::std::mem::size_of::<_GSList>() - 16usize]; + ["Alignment of _GSList"][::std::mem::align_of::<_GSList>() - 8usize]; + ["Offset of field: _GSList::data"][::std::mem::offset_of!(_GSList, data) - 0usize]; + ["Offset of field: _GSList::next"][::std::mem::offset_of!(_GSList, next) - 8usize]; +}; +impl Default for _GSList { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct AccelCPUState { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct AddressSpace { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Clock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CPUAddressSpace { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CpuInfoFast { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CPUJumpCache { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Error { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct MemoryRegion { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct QDict { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct QObject { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct RAMBlock { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct TCGCPUOps { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Visitor { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VMChangeStateEntry { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct VMStateDescription { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct IRQState { + _unused: [u8; 0], +} +pub type qemu_irq = *mut IRQState; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct QEnumLookup { + pub array: *const *const ::std::os::raw::c_char, + pub special_features: *const ::std::os::raw::c_uchar, + pub size: ::std::os::raw::c_int, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of QEnumLookup"][::std::mem::size_of::() - 24usize]; + ["Alignment of QEnumLookup"][::std::mem::align_of::() - 8usize]; + ["Offset of field: QEnumLookup::array"][::std::mem::offset_of!(QEnumLookup, array) - 0usize]; + ["Offset of field: QEnumLookup::special_features"] + [::std::mem::offset_of!(QEnumLookup, special_features) - 8usize]; + ["Offset of field: QEnumLookup::size"][::std::mem::offset_of!(QEnumLookup, size) - 16usize]; +}; +impl Default for QEnumLookup { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +extern "C" { + pub fn qemu_target_page_size() -> usize; +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct QemuMutex { + pub lock: pthread_mutex_t, + pub initialized: bool, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of QemuMutex"][::std::mem::size_of::() - 48usize]; + ["Alignment of QemuMutex"][::std::mem::align_of::() - 8usize]; + ["Offset of field: QemuMutex::lock"][::std::mem::offset_of!(QemuMutex, lock) - 0usize]; + ["Offset of field: QemuMutex::initialized"] + [::std::mem::offset_of!(QemuMutex, initialized) - 40usize]; +}; +impl Default for QemuMutex { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for QemuMutex { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "QemuMutex {{ lock: {:?}, initialized: {:?} }}", + self.lock, self.initialized + ) + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct QemuCond { + pub cond: pthread_cond_t, + pub initialized: bool, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of QemuCond"][::std::mem::size_of::() - 56usize]; + ["Alignment of QemuCond"][::std::mem::align_of::() - 8usize]; + ["Offset of field: QemuCond::cond"][::std::mem::offset_of!(QemuCond, cond) - 0usize]; + ["Offset of field: QemuCond::initialized"] + [::std::mem::offset_of!(QemuCond, initialized) - 48usize]; +}; +impl Default for QemuCond { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for QemuCond { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "QemuCond {{ cond: {:?}, initialized: {:?} }}", + self.cond, self.initialized + ) + } +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct QemuThread { + pub thread: pthread_t, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of QemuThread"][::std::mem::size_of::() - 8usize]; + ["Alignment of QemuThread"][::std::mem::align_of::() - 8usize]; + ["Offset of field: QemuThread::thread"][::std::mem::offset_of!(QemuThread, thread) - 0usize]; +}; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct QemuSpin { + pub value: ::std::os::raw::c_int, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of QemuSpin"][::std::mem::size_of::() - 4usize]; + ["Alignment of QemuSpin"][::std::mem::align_of::() - 4usize]; + ["Offset of field: QemuSpin::value"][::std::mem::offset_of!(QemuSpin, value) - 0usize]; +}; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct QemuLockCnt { + pub count: ::std::os::raw::c_uint, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of QemuLockCnt"][::std::mem::size_of::() - 4usize]; + ["Alignment of QemuLockCnt"][::std::mem::align_of::() - 4usize]; + ["Offset of field: QemuLockCnt::count"][::std::mem::offset_of!(QemuLockCnt, count) - 0usize]; +}; +#[repr(C)] +#[repr(align(4))] +#[derive(Debug, Default, Copy, Clone)] +pub struct MemTxAttrs { + pub _bitfield_align_1: [u16; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 3usize]>, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of MemTxAttrs"][::std::mem::size_of::() - 4usize]; + ["Alignment of MemTxAttrs"][::std::mem::align_of::() - 4usize]; +}; +impl MemTxAttrs { + #[inline] + pub fn unspecified(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } + } + #[inline] + pub fn set_unspecified(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 1u8, val as u64) + } + } + #[inline] + pub fn secure(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } + } + #[inline] + pub fn set_secure(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(1usize, 1u8, val as u64) + } + } + #[inline] + pub fn space(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 2u8) as u32) } + } + #[inline] + pub fn set_space(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(2usize, 2u8, val as u64) + } + } + #[inline] + pub fn user(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u32) } + } + #[inline] + pub fn set_user(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(4usize, 1u8, val as u64) + } + } + #[inline] + pub fn memory(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u32) } + } + #[inline] + pub fn set_memory(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(5usize, 1u8, val as u64) + } + } + #[inline] + pub fn requester_id(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 16u8) as u32) } + } + #[inline] + pub fn set_requester_id(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(6usize, 16u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1( + unspecified: ::std::os::raw::c_uint, + secure: ::std::os::raw::c_uint, + space: ::std::os::raw::c_uint, + user: ::std::os::raw::c_uint, + memory: ::std::os::raw::c_uint, + requester_id: ::std::os::raw::c_uint, + ) -> __BindgenBitfieldUnit<[u8; 3usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 1u8, { + let unspecified: u32 = unsafe { ::std::mem::transmute(unspecified) }; + unspecified as u64 + }); + __bindgen_bitfield_unit.set(1usize, 1u8, { + let secure: u32 = unsafe { ::std::mem::transmute(secure) }; + secure as u64 + }); + __bindgen_bitfield_unit.set(2usize, 2u8, { + let space: u32 = unsafe { ::std::mem::transmute(space) }; + space as u64 + }); + __bindgen_bitfield_unit.set(4usize, 1u8, { + let user: u32 = unsafe { ::std::mem::transmute(user) }; + user as u64 + }); + __bindgen_bitfield_unit.set(5usize, 1u8, { + let memory: u32 = unsafe { ::std::mem::transmute(memory) }; + memory as u64 + }); + __bindgen_bitfield_unit.set(6usize, 16u8, { + let requester_id: u32 = unsafe { ::std::mem::transmute(requester_id) }; + requester_id as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct QTailQLink { + pub tql_next: *mut ::std::os::raw::c_void, + pub tql_prev: *mut QTailQLink, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of QTailQLink"][::std::mem::size_of::() - 16usize]; + ["Alignment of QTailQLink"][::std::mem::align_of::() - 8usize]; + ["Offset of field: QTailQLink::tql_next"] + [::std::mem::offset_of!(QTailQLink, tql_next) - 0usize]; + ["Offset of field: QTailQLink::tql_prev"] + [::std::mem::offset_of!(QTailQLink, tql_prev) - 8usize]; +}; +impl Default for QTailQLink { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Notifier { + pub notify: ::std::option::Option< + unsafe extern "C" fn(notifier: *mut Notifier, data: *mut ::std::os::raw::c_void), + >, + pub node: Notifier__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Notifier__bindgen_ty_1 { + pub le_next: *mut Notifier, + pub le_prev: *mut *mut Notifier, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of Notifier__bindgen_ty_1"][::std::mem::size_of::() - 16usize]; + ["Alignment of Notifier__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: Notifier__bindgen_ty_1::le_next"] + [::std::mem::offset_of!(Notifier__bindgen_ty_1, le_next) - 0usize]; + ["Offset of field: Notifier__bindgen_ty_1::le_prev"] + [::std::mem::offset_of!(Notifier__bindgen_ty_1, le_prev) - 8usize]; +}; +impl Default for Notifier__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of Notifier"][::std::mem::size_of::() - 24usize]; + ["Alignment of Notifier"][::std::mem::align_of::() - 8usize]; + ["Offset of field: Notifier::notify"][::std::mem::offset_of!(Notifier, notify) - 0usize]; + ["Offset of field: Notifier::node"][::std::mem::offset_of!(Notifier, node) - 8usize]; +}; +impl Default for Notifier { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub type RCUCBFunc = ::std::option::Option; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct rcu_head { + pub next: *mut rcu_head, + pub func: RCUCBFunc, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of rcu_head"][::std::mem::size_of::() - 16usize]; + ["Alignment of rcu_head"][::std::mem::align_of::() - 8usize]; + ["Offset of field: rcu_head::next"][::std::mem::offset_of!(rcu_head, next) - 0usize]; + ["Offset of field: rcu_head::func"][::std::mem::offset_of!(rcu_head, func) - 8usize]; +}; +impl Default for rcu_head { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct TypeImpl { + _unused: [u8; 0], +} +pub type Type = *mut TypeImpl; +#[doc = " typedef ObjectPropertyAccessor:\n @obj: the object that owns the property\n @v: the visitor that contains the property data\n @name: the name of the property\n @opaque: the object property opaque\n @errp: a pointer to an Error that is filled if getting/setting fails.\n\n Called when trying to get/set a property."] +pub type ObjectPropertyAccessor = ::std::option::Option< + unsafe extern "C" fn( + obj: *mut Object, + v: *mut Visitor, + name: *const ::std::os::raw::c_char, + opaque: *mut ::std::os::raw::c_void, + errp: *mut *mut Error, + ), +>; +#[doc = " typedef ObjectPropertyResolve:\n @obj: the object that owns the property\n @opaque: the opaque registered with the property\n @part: the name of the property\n\n Resolves the #Object corresponding to property @part.\n\n The returned object can also be used as a starting point\n to resolve a relative path starting with \"@part\".\n\n Returns: If @path is the path that led to @obj, the function\n returns the #Object corresponding to \"@path/@part\".\n If \"@path/@part\" is not a valid object path, it returns #NULL."] +pub type ObjectPropertyResolve = ::std::option::Option< + unsafe extern "C" fn( + obj: *mut Object, + opaque: *mut ::std::os::raw::c_void, + part: *const ::std::os::raw::c_char, + ) -> *mut Object, +>; +#[doc = " typedef ObjectPropertyRelease:\n @obj: the object that owns the property\n @name: the name of the property\n @opaque: the opaque registered with the property\n\n Called when a property is removed from a object."] +pub type ObjectPropertyRelease = ::std::option::Option< + unsafe extern "C" fn( + obj: *mut Object, + name: *const ::std::os::raw::c_char, + opaque: *mut ::std::os::raw::c_void, + ), +>; +#[doc = " typedef ObjectPropertyInit:\n @obj: the object that owns the property\n @prop: the property to set\n\n Called when a property is initialized."] +pub type ObjectPropertyInit = + ::std::option::Option; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ObjectProperty { + pub name: *mut ::std::os::raw::c_char, + pub type_: *mut ::std::os::raw::c_char, + pub description: *mut ::std::os::raw::c_char, + pub get: ObjectPropertyAccessor, + pub set: ObjectPropertyAccessor, + pub resolve: ObjectPropertyResolve, + pub release: ObjectPropertyRelease, + pub init: ObjectPropertyInit, + pub opaque: *mut ::std::os::raw::c_void, + pub defval: *mut QObject, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of ObjectProperty"][::std::mem::size_of::() - 80usize]; + ["Alignment of ObjectProperty"][::std::mem::align_of::() - 8usize]; + ["Offset of field: ObjectProperty::name"] + [::std::mem::offset_of!(ObjectProperty, name) - 0usize]; + ["Offset of field: ObjectProperty::type_"] + [::std::mem::offset_of!(ObjectProperty, type_) - 8usize]; + ["Offset of field: ObjectProperty::description"] + [::std::mem::offset_of!(ObjectProperty, description) - 16usize]; + ["Offset of field: ObjectProperty::get"][::std::mem::offset_of!(ObjectProperty, get) - 24usize]; + ["Offset of field: ObjectProperty::set"][::std::mem::offset_of!(ObjectProperty, set) - 32usize]; + ["Offset of field: ObjectProperty::resolve"] + [::std::mem::offset_of!(ObjectProperty, resolve) - 40usize]; + ["Offset of field: ObjectProperty::release"] + [::std::mem::offset_of!(ObjectProperty, release) - 48usize]; + ["Offset of field: ObjectProperty::init"] + [::std::mem::offset_of!(ObjectProperty, init) - 56usize]; + ["Offset of field: ObjectProperty::opaque"] + [::std::mem::offset_of!(ObjectProperty, opaque) - 64usize]; + ["Offset of field: ObjectProperty::defval"] + [::std::mem::offset_of!(ObjectProperty, defval) - 72usize]; +}; +impl Default for ObjectProperty { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[doc = " typedef ObjectUnparent:\n @obj: the object that is being removed from the composition tree\n\n Called when an object is being removed from the QOM composition tree.\n The function should remove any backlinks from children objects to @obj."] +pub type ObjectUnparent = ::std::option::Option; +#[doc = " typedef ObjectFree:\n @obj: the object being freed\n\n Called when an object's last reference is removed."] +pub type ObjectFree = ::std::option::Option; +#[doc = " struct ObjectClass:\n\n The base for all classes. The only thing that #ObjectClass contains is an\n integer type handle."] +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct ObjectClass { + pub type_: Type, + pub interfaces: *mut GSList, + pub object_cast_cache: [*const ::std::os::raw::c_char; 4usize], + pub class_cast_cache: [*const ::std::os::raw::c_char; 4usize], + pub unparent: ObjectUnparent, + pub properties: *mut GHashTable, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of ObjectClass"][::std::mem::size_of::() - 96usize]; + ["Alignment of ObjectClass"][::std::mem::align_of::() - 8usize]; + ["Offset of field: ObjectClass::type_"][::std::mem::offset_of!(ObjectClass, type_) - 0usize]; + ["Offset of field: ObjectClass::interfaces"] + [::std::mem::offset_of!(ObjectClass, interfaces) - 8usize]; + ["Offset of field: ObjectClass::object_cast_cache"] + [::std::mem::offset_of!(ObjectClass, object_cast_cache) - 16usize]; + ["Offset of field: ObjectClass::class_cast_cache"] + [::std::mem::offset_of!(ObjectClass, class_cast_cache) - 48usize]; + ["Offset of field: ObjectClass::unparent"] + [::std::mem::offset_of!(ObjectClass, unparent) - 80usize]; + ["Offset of field: ObjectClass::properties"] + [::std::mem::offset_of!(ObjectClass, properties) - 88usize]; +}; +impl Default for ObjectClass { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[doc = " struct Object:\n\n The base for all objects. The first member of this object is a pointer to\n a #ObjectClass. Since C guarantees that the first member of a structure\n always begins at byte 0 of that structure, as long as any sub-object places\n its parent as the first member, we can cast directly to a #Object.\n\n As a result, #Object contains a reference to the objects type as its\n first member. This allows identification of the real type of the object at\n run time."] +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct Object { + pub class: *mut ObjectClass, + pub free: ObjectFree, + pub properties: *mut GHashTable, + pub ref_: u32, + pub parent: *mut Object, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of Object"][::std::mem::size_of::() - 40usize]; + ["Alignment of Object"][::std::mem::align_of::() - 8usize]; + ["Offset of field: Object::class"][::std::mem::offset_of!(Object, class) - 0usize]; + ["Offset of field: Object::free"][::std::mem::offset_of!(Object, free) - 8usize]; + ["Offset of field: Object::properties"][::std::mem::offset_of!(Object, properties) - 16usize]; + ["Offset of field: Object::ref_"][::std::mem::offset_of!(Object, ref_) - 24usize]; + ["Offset of field: Object::parent"][::std::mem::offset_of!(Object, parent) - 32usize]; +}; +impl Default for Object { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct HotplugHandler { + _unused: [u8; 0], +} +#[doc = " ResettableState:\n Structure holding reset related state. The fields should not be accessed\n directly; the definition is here to allow further inclusion into other\n objects.\n\n @count: Number of reset level the object is into. It is incremented when\n the reset operation starts and decremented when it finishes.\n @hold_phase_pending: flag which indicates that we need to invoke the 'hold'\n phase handler for this object.\n @exit_phase_in_progress: true if we are currently in the exit phase"] +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct ResettableState { + pub count: ::std::os::raw::c_uint, + pub hold_phase_pending: bool, + pub exit_phase_in_progress: bool, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of ResettableState"][::std::mem::size_of::() - 8usize]; + ["Alignment of ResettableState"][::std::mem::align_of::() - 4usize]; + ["Offset of field: ResettableState::count"] + [::std::mem::offset_of!(ResettableState, count) - 0usize]; + ["Offset of field: ResettableState::hold_phase_pending"] + [::std::mem::offset_of!(ResettableState, hold_phase_pending) - 4usize]; + ["Offset of field: ResettableState::exit_phase_in_progress"] + [::std::mem::offset_of!(ResettableState, exit_phase_in_progress) - 5usize]; +}; +pub type DeviceRealize = + ::std::option::Option; +pub type DeviceUnrealize = ::std::option::Option; +pub type DeviceReset = ::std::option::Option; +#[doc = " struct DeviceClass - The base class for all devices.\n @props: Properties accessing state fields.\n @realize: Callback function invoked when the #DeviceState:realized\n property is changed to %true.\n @unrealize: Callback function invoked when the #DeviceState:realized\n property is changed to %false.\n @hotpluggable: indicates if #DeviceClass is hotpluggable, available\n as readonly \"hotpluggable\" property of #DeviceState instance\n"] +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct DeviceClass { + pub parent_class: ObjectClass, + #[doc = " @categories: device categories device belongs to"] + pub categories: [::std::os::raw::c_ulong; 1usize], + #[doc = " @fw_name: name used to identify device to firmware interfaces"] + pub fw_name: *const ::std::os::raw::c_char, + #[doc = " @desc: human readable description of device"] + pub desc: *const ::std::os::raw::c_char, + #[doc = " @props_: properties associated with device, should only be\n assigned by using device_class_set_props(). The underscore\n ensures a compile-time error if someone attempts to assign\n dc->props directly."] + pub props_: *mut Property, + #[doc = " @user_creatable: Can user instantiate with -device / device_add?\n\n All devices should support instantiation with device_add, and\n this flag should not exist. But we're not there, yet. Some\n devices fail to instantiate with cryptic error messages.\n Others instantiate, but don't work. Exposing users to such\n behavior would be cruel; clearing this flag will protect them.\n It should never be cleared without a comment explaining why it\n is cleared.\n\n TODO remove once we're there"] + pub user_creatable: bool, + pub hotpluggable: bool, + #[doc = " @reset: deprecated device reset method pointer\n\n Modern code should use the ResettableClass interface to\n implement a multi-phase reset.\n\n TODO: remove once every reset callback is unused"] + pub reset: DeviceReset, + pub realize: DeviceRealize, + pub unrealize: DeviceUnrealize, + #[doc = " @vmsd: device state serialisation description for\n migration/save/restore"] + pub vmsd: *const VMStateDescription, + #[doc = " @bus_type: bus type\n private: to qdev / bus."] + pub bus_type: *const ::std::os::raw::c_char, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of DeviceClass"][::std::mem::size_of::() - 176usize]; + ["Alignment of DeviceClass"][::std::mem::align_of::() - 8usize]; + ["Offset of field: DeviceClass::parent_class"] + [::std::mem::offset_of!(DeviceClass, parent_class) - 0usize]; + ["Offset of field: DeviceClass::categories"] + [::std::mem::offset_of!(DeviceClass, categories) - 96usize]; + ["Offset of field: DeviceClass::fw_name"] + [::std::mem::offset_of!(DeviceClass, fw_name) - 104usize]; + ["Offset of field: DeviceClass::desc"][::std::mem::offset_of!(DeviceClass, desc) - 112usize]; + ["Offset of field: DeviceClass::props_"] + [::std::mem::offset_of!(DeviceClass, props_) - 120usize]; + ["Offset of field: DeviceClass::user_creatable"] + [::std::mem::offset_of!(DeviceClass, user_creatable) - 128usize]; + ["Offset of field: DeviceClass::hotpluggable"] + [::std::mem::offset_of!(DeviceClass, hotpluggable) - 129usize]; + ["Offset of field: DeviceClass::reset"][::std::mem::offset_of!(DeviceClass, reset) - 136usize]; + ["Offset of field: DeviceClass::realize"] + [::std::mem::offset_of!(DeviceClass, realize) - 144usize]; + ["Offset of field: DeviceClass::unrealize"] + [::std::mem::offset_of!(DeviceClass, unrealize) - 152usize]; + ["Offset of field: DeviceClass::vmsd"][::std::mem::offset_of!(DeviceClass, vmsd) - 160usize]; + ["Offset of field: DeviceClass::bus_type"] + [::std::mem::offset_of!(DeviceClass, bus_type) - 168usize]; +}; +impl Default for DeviceClass { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct NamedGPIOList { + pub name: *mut ::std::os::raw::c_char, + pub in_: *mut qemu_irq, + pub num_in: ::std::os::raw::c_int, + pub num_out: ::std::os::raw::c_int, + pub node: NamedGPIOList__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct NamedGPIOList__bindgen_ty_1 { + pub le_next: *mut NamedGPIOList, + pub le_prev: *mut *mut NamedGPIOList, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of NamedGPIOList__bindgen_ty_1"] + [::std::mem::size_of::() - 16usize]; + ["Alignment of NamedGPIOList__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: NamedGPIOList__bindgen_ty_1::le_next"] + [::std::mem::offset_of!(NamedGPIOList__bindgen_ty_1, le_next) - 0usize]; + ["Offset of field: NamedGPIOList__bindgen_ty_1::le_prev"] + [::std::mem::offset_of!(NamedGPIOList__bindgen_ty_1, le_prev) - 8usize]; +}; +impl Default for NamedGPIOList__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of NamedGPIOList"][::std::mem::size_of::() - 40usize]; + ["Alignment of NamedGPIOList"][::std::mem::align_of::() - 8usize]; + ["Offset of field: NamedGPIOList::name"][::std::mem::offset_of!(NamedGPIOList, name) - 0usize]; + ["Offset of field: NamedGPIOList::in_"][::std::mem::offset_of!(NamedGPIOList, in_) - 8usize]; + ["Offset of field: NamedGPIOList::num_in"] + [::std::mem::offset_of!(NamedGPIOList, num_in) - 16usize]; + ["Offset of field: NamedGPIOList::num_out"] + [::std::mem::offset_of!(NamedGPIOList, num_out) - 20usize]; + ["Offset of field: NamedGPIOList::node"][::std::mem::offset_of!(NamedGPIOList, node) - 24usize]; +}; +impl Default for NamedGPIOList { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct NamedClockList { + pub name: *mut ::std::os::raw::c_char, + pub clock: *mut Clock, + pub output: bool, + pub alias: bool, + pub node: NamedClockList__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct NamedClockList__bindgen_ty_1 { + pub le_next: *mut NamedClockList, + pub le_prev: *mut *mut NamedClockList, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of NamedClockList__bindgen_ty_1"] + [::std::mem::size_of::() - 16usize]; + ["Alignment of NamedClockList__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: NamedClockList__bindgen_ty_1::le_next"] + [::std::mem::offset_of!(NamedClockList__bindgen_ty_1, le_next) - 0usize]; + ["Offset of field: NamedClockList__bindgen_ty_1::le_prev"] + [::std::mem::offset_of!(NamedClockList__bindgen_ty_1, le_prev) - 8usize]; +}; +impl Default for NamedClockList__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of NamedClockList"][::std::mem::size_of::() - 40usize]; + ["Alignment of NamedClockList"][::std::mem::align_of::() - 8usize]; + ["Offset of field: NamedClockList::name"] + [::std::mem::offset_of!(NamedClockList, name) - 0usize]; + ["Offset of field: NamedClockList::clock"] + [::std::mem::offset_of!(NamedClockList, clock) - 8usize]; + ["Offset of field: NamedClockList::output"] + [::std::mem::offset_of!(NamedClockList, output) - 16usize]; + ["Offset of field: NamedClockList::alias"] + [::std::mem::offset_of!(NamedClockList, alias) - 17usize]; + ["Offset of field: NamedClockList::node"] + [::std::mem::offset_of!(NamedClockList, node) - 24usize]; +}; +impl Default for NamedClockList { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct MemReentrancyGuard { + pub engaged_in_io: bool, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of MemReentrancyGuard"][::std::mem::size_of::() - 1usize]; + ["Alignment of MemReentrancyGuard"][::std::mem::align_of::() - 1usize]; + ["Offset of field: MemReentrancyGuard::engaged_in_io"] + [::std::mem::offset_of!(MemReentrancyGuard, engaged_in_io) - 0usize]; +}; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct NamedGPIOListHead { + pub lh_first: *mut NamedGPIOList, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of NamedGPIOListHead"][::std::mem::size_of::() - 8usize]; + ["Alignment of NamedGPIOListHead"][::std::mem::align_of::() - 8usize]; + ["Offset of field: NamedGPIOListHead::lh_first"] + [::std::mem::offset_of!(NamedGPIOListHead, lh_first) - 0usize]; +}; +impl Default for NamedGPIOListHead { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct NamedClockListHead { + pub lh_first: *mut NamedClockList, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of NamedClockListHead"][::std::mem::size_of::() - 8usize]; + ["Alignment of NamedClockListHead"][::std::mem::align_of::() - 8usize]; + ["Offset of field: NamedClockListHead::lh_first"] + [::std::mem::offset_of!(NamedClockListHead, lh_first) - 0usize]; +}; +impl Default for NamedClockListHead { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct BusStateHead { + pub lh_first: *mut BusState, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of BusStateHead"][::std::mem::size_of::() - 8usize]; + ["Alignment of BusStateHead"][::std::mem::align_of::() - 8usize]; + ["Offset of field: BusStateHead::lh_first"] + [::std::mem::offset_of!(BusStateHead, lh_first) - 0usize]; +}; +impl Default for BusStateHead { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[doc = " struct DeviceState - common device state, accessed with qdev helpers\n\n This structure should not be accessed directly. We declare it here\n so that it can be embedded in individual device state structures."] +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct DeviceState { + pub parent_obj: Object, + #[doc = " @id: global device id"] + pub id: *mut ::std::os::raw::c_char, + #[doc = " @canonical_path: canonical path of realized device in the QOM tree"] + pub canonical_path: *mut ::std::os::raw::c_char, + #[doc = " @realized: has device been realized?"] + pub realized: bool, + #[doc = " @pending_deleted_event: track pending deletion events during unplug"] + pub pending_deleted_event: bool, + #[doc = " @pending_deleted_expires_ms: optional timeout for deletion events"] + pub pending_deleted_expires_ms: i64, + #[doc = " @opts: QDict of options for the device"] + pub opts: *mut QDict, + #[doc = " @hotplugged: was device added after PHASE_MACHINE_READY?"] + pub hotplugged: ::std::os::raw::c_int, + #[doc = " @allow_unplug_during_migration: can device be unplugged during migration"] + pub allow_unplug_during_migration: bool, + #[doc = " @parent_bus: bus this device belongs to"] + pub parent_bus: *mut BusState, + #[doc = " @gpios: QLIST of named GPIOs the device provides."] + pub gpios: NamedGPIOListHead, + #[doc = " @clocks: QLIST of named clocks the device provides."] + pub clocks: NamedClockListHead, + #[doc = " @child_bus: QLIST of child buses"] + pub child_bus: BusStateHead, + #[doc = " @num_child_bus: number of @child_bus entries"] + pub num_child_bus: ::std::os::raw::c_int, + #[doc = " @instance_id_alias: device alias for handling legacy migration setups"] + pub instance_id_alias: ::std::os::raw::c_int, + #[doc = " @alias_required_for_version: indicates @instance_id_alias is\n needed for migration"] + pub alias_required_for_version: ::std::os::raw::c_int, + #[doc = " @reset: ResettableState for the device; handled by Resettable interface."] + pub reset: ResettableState, + #[doc = " @unplug_blockers: list of reasons to block unplugging of device"] + pub unplug_blockers: *mut GSList, + #[doc = " @mem_reentrancy_guard: Is the device currently in mmio/pio/dma?\n\n Used to prevent re-entrancy confusing things."] + pub mem_reentrancy_guard: MemReentrancyGuard, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of DeviceState"][::std::mem::size_of::() - 160usize]; + ["Alignment of DeviceState"][::std::mem::align_of::() - 8usize]; + ["Offset of field: DeviceState::parent_obj"] + [::std::mem::offset_of!(DeviceState, parent_obj) - 0usize]; + ["Offset of field: DeviceState::id"][::std::mem::offset_of!(DeviceState, id) - 40usize]; + ["Offset of field: DeviceState::canonical_path"] + [::std::mem::offset_of!(DeviceState, canonical_path) - 48usize]; + ["Offset of field: DeviceState::realized"] + [::std::mem::offset_of!(DeviceState, realized) - 56usize]; + ["Offset of field: DeviceState::pending_deleted_event"] + [::std::mem::offset_of!(DeviceState, pending_deleted_event) - 57usize]; + ["Offset of field: DeviceState::pending_deleted_expires_ms"] + [::std::mem::offset_of!(DeviceState, pending_deleted_expires_ms) - 64usize]; + ["Offset of field: DeviceState::opts"][::std::mem::offset_of!(DeviceState, opts) - 72usize]; + ["Offset of field: DeviceState::hotplugged"] + [::std::mem::offset_of!(DeviceState, hotplugged) - 80usize]; + ["Offset of field: DeviceState::allow_unplug_during_migration"] + [::std::mem::offset_of!(DeviceState, allow_unplug_during_migration) - 84usize]; + ["Offset of field: DeviceState::parent_bus"] + [::std::mem::offset_of!(DeviceState, parent_bus) - 88usize]; + ["Offset of field: DeviceState::gpios"][::std::mem::offset_of!(DeviceState, gpios) - 96usize]; + ["Offset of field: DeviceState::clocks"] + [::std::mem::offset_of!(DeviceState, clocks) - 104usize]; + ["Offset of field: DeviceState::child_bus"] + [::std::mem::offset_of!(DeviceState, child_bus) - 112usize]; + ["Offset of field: DeviceState::num_child_bus"] + [::std::mem::offset_of!(DeviceState, num_child_bus) - 120usize]; + ["Offset of field: DeviceState::instance_id_alias"] + [::std::mem::offset_of!(DeviceState, instance_id_alias) - 124usize]; + ["Offset of field: DeviceState::alias_required_for_version"] + [::std::mem::offset_of!(DeviceState, alias_required_for_version) - 128usize]; + ["Offset of field: DeviceState::reset"][::std::mem::offset_of!(DeviceState, reset) - 132usize]; + ["Offset of field: DeviceState::unplug_blockers"] + [::std::mem::offset_of!(DeviceState, unplug_blockers) - 144usize]; + ["Offset of field: DeviceState::mem_reentrancy_guard"] + [::std::mem::offset_of!(DeviceState, mem_reentrancy_guard) - 152usize]; +}; +impl Default for DeviceState { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct BusChild { + pub rcu: rcu_head, + pub child: *mut DeviceState, + pub index: ::std::os::raw::c_int, + pub sibling: BusChild__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union BusChild__bindgen_ty_1 { + pub tqe_next: *mut BusChild, + pub tqe_circ: QTailQLink, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of BusChild__bindgen_ty_1"][::std::mem::size_of::() - 16usize]; + ["Alignment of BusChild__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: BusChild__bindgen_ty_1::tqe_next"] + [::std::mem::offset_of!(BusChild__bindgen_ty_1, tqe_next) - 0usize]; + ["Offset of field: BusChild__bindgen_ty_1::tqe_circ"] + [::std::mem::offset_of!(BusChild__bindgen_ty_1, tqe_circ) - 0usize]; +}; +impl Default for BusChild__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for BusChild__bindgen_ty_1 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "BusChild__bindgen_ty_1 {{ union }}") + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of BusChild"][::std::mem::size_of::() - 48usize]; + ["Alignment of BusChild"][::std::mem::align_of::() - 8usize]; + ["Offset of field: BusChild::rcu"][::std::mem::offset_of!(BusChild, rcu) - 0usize]; + ["Offset of field: BusChild::child"][::std::mem::offset_of!(BusChild, child) - 16usize]; + ["Offset of field: BusChild::index"][::std::mem::offset_of!(BusChild, index) - 24usize]; + ["Offset of field: BusChild::sibling"][::std::mem::offset_of!(BusChild, sibling) - 32usize]; +}; +impl Default for BusChild { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for BusChild { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "BusChild {{ rcu: {:?}, child: {:?}, index: {:?}, sibling: {:?} }}", + self.rcu, self.child, self.index, self.sibling + ) + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union BusChildHead { + pub tqh_first: *mut BusChild, + pub tqh_circ: QTailQLink, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of BusChildHead"][::std::mem::size_of::() - 16usize]; + ["Alignment of BusChildHead"][::std::mem::align_of::() - 8usize]; + ["Offset of field: BusChildHead::tqh_first"] + [::std::mem::offset_of!(BusChildHead, tqh_first) - 0usize]; + ["Offset of field: BusChildHead::tqh_circ"] + [::std::mem::offset_of!(BusChildHead, tqh_circ) - 0usize]; +}; +impl Default for BusChildHead { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for BusChildHead { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "BusChildHead {{ union }}") + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct BusStateEntry { + pub le_next: *mut BusState, + pub le_prev: *mut *mut BusState, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of BusStateEntry"][::std::mem::size_of::() - 16usize]; + ["Alignment of BusStateEntry"][::std::mem::align_of::() - 8usize]; + ["Offset of field: BusStateEntry::le_next"] + [::std::mem::offset_of!(BusStateEntry, le_next) - 0usize]; + ["Offset of field: BusStateEntry::le_prev"] + [::std::mem::offset_of!(BusStateEntry, le_prev) - 8usize]; +}; +impl Default for BusStateEntry { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[doc = " struct BusState:\n @obj: parent object\n @parent: parent Device\n @name: name of bus\n @hotplug_handler: link to a hotplug handler associated with bus.\n @max_index: max number of child buses\n @realized: is the bus itself realized?\n @full: is the bus full?\n @num_children: current number of child buses"] +#[repr(C)] +#[derive(Copy, Clone)] +pub struct BusState { + pub obj: Object, + pub parent: *mut DeviceState, + pub name: *mut ::std::os::raw::c_char, + pub hotplug_handler: *mut HotplugHandler, + pub max_index: ::std::os::raw::c_int, + pub realized: bool, + pub full: bool, + pub num_children: ::std::os::raw::c_int, + #[doc = " @children: an RCU protected QTAILQ, thus readers must use RCU\n to access it, and writers must hold the big qemu lock"] + pub children: BusChildHead, + #[doc = " @sibling: next bus"] + pub sibling: BusStateEntry, + #[doc = " @reset: ResettableState for the bus; handled by Resettable interface."] + pub reset: ResettableState, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of BusState"][::std::mem::size_of::() - 120usize]; + ["Alignment of BusState"][::std::mem::align_of::() - 8usize]; + ["Offset of field: BusState::obj"][::std::mem::offset_of!(BusState, obj) - 0usize]; + ["Offset of field: BusState::parent"][::std::mem::offset_of!(BusState, parent) - 40usize]; + ["Offset of field: BusState::name"][::std::mem::offset_of!(BusState, name) - 48usize]; + ["Offset of field: BusState::hotplug_handler"] + [::std::mem::offset_of!(BusState, hotplug_handler) - 56usize]; + ["Offset of field: BusState::max_index"][::std::mem::offset_of!(BusState, max_index) - 64usize]; + ["Offset of field: BusState::realized"][::std::mem::offset_of!(BusState, realized) - 68usize]; + ["Offset of field: BusState::full"][::std::mem::offset_of!(BusState, full) - 69usize]; + ["Offset of field: BusState::num_children"] + [::std::mem::offset_of!(BusState, num_children) - 72usize]; + ["Offset of field: BusState::children"][::std::mem::offset_of!(BusState, children) - 80usize]; + ["Offset of field: BusState::sibling"][::std::mem::offset_of!(BusState, sibling) - 96usize]; + ["Offset of field: BusState::reset"][::std::mem::offset_of!(BusState, reset) - 112usize]; +}; +impl Default for BusState { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for BusState { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write ! (f , "BusState {{ obj: {:?}, parent: {:?}, name: {:?}, hotplug_handler: {:?}, max_index: {:?}, realized: {:?}, full: {:?}, num_children: {:?}, children: {:?}, sibling: {:?}, reset: {:?} }}" , self . obj , self . parent , self . name , self . hotplug_handler , self . max_index , self . realized , self . full , self . num_children , self . children , self . sibling , self . reset) + } +} +pub type PTR = *mut ::std::os::raw::c_void; +pub type bfd_vma = u64; +pub type bfd_byte = u8; +pub const bfd_flavour_bfd_target_unknown_flavour: bfd_flavour = bfd_flavour(0); +pub const bfd_flavour_bfd_target_aout_flavour: bfd_flavour = bfd_flavour(1); +pub const bfd_flavour_bfd_target_coff_flavour: bfd_flavour = bfd_flavour(2); +pub const bfd_flavour_bfd_target_ecoff_flavour: bfd_flavour = bfd_flavour(3); +pub const bfd_flavour_bfd_target_elf_flavour: bfd_flavour = bfd_flavour(4); +pub const bfd_flavour_bfd_target_ieee_flavour: bfd_flavour = bfd_flavour(5); +pub const bfd_flavour_bfd_target_nlm_flavour: bfd_flavour = bfd_flavour(6); +pub const bfd_flavour_bfd_target_oasys_flavour: bfd_flavour = bfd_flavour(7); +pub const bfd_flavour_bfd_target_tekhex_flavour: bfd_flavour = bfd_flavour(8); +pub const bfd_flavour_bfd_target_srec_flavour: bfd_flavour = bfd_flavour(9); +pub const bfd_flavour_bfd_target_ihex_flavour: bfd_flavour = bfd_flavour(10); +pub const bfd_flavour_bfd_target_som_flavour: bfd_flavour = bfd_flavour(11); +pub const bfd_flavour_bfd_target_os9k_flavour: bfd_flavour = bfd_flavour(12); +pub const bfd_flavour_bfd_target_versados_flavour: bfd_flavour = bfd_flavour(13); +pub const bfd_flavour_bfd_target_msdos_flavour: bfd_flavour = bfd_flavour(14); +pub const bfd_flavour_bfd_target_evax_flavour: bfd_flavour = bfd_flavour(15); +impl ::std::ops::BitOr for bfd_flavour { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + bfd_flavour(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for bfd_flavour { + #[inline] + fn bitor_assign(&mut self, rhs: bfd_flavour) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for bfd_flavour { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + bfd_flavour(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for bfd_flavour { + #[inline] + fn bitand_assign(&mut self, rhs: bfd_flavour) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct bfd_flavour(pub ::std::os::raw::c_uint); +pub const bfd_endian_BFD_ENDIAN_BIG: bfd_endian = bfd_endian(0); +pub const bfd_endian_BFD_ENDIAN_LITTLE: bfd_endian = bfd_endian(1); +pub const bfd_endian_BFD_ENDIAN_UNKNOWN: bfd_endian = bfd_endian(2); +impl ::std::ops::BitOr for bfd_endian { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + bfd_endian(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for bfd_endian { + #[inline] + fn bitor_assign(&mut self, rhs: bfd_endian) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for bfd_endian { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + bfd_endian(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for bfd_endian { + #[inline] + fn bitand_assign(&mut self, rhs: bfd_endian) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct bfd_endian(pub ::std::os::raw::c_uint); +pub const bfd_architecture_bfd_arch_unknown: bfd_architecture = bfd_architecture(0); +pub const bfd_architecture_bfd_arch_obscure: bfd_architecture = bfd_architecture(1); +pub const bfd_architecture_bfd_arch_m68k: bfd_architecture = bfd_architecture(2); +pub const bfd_architecture_bfd_arch_vax: bfd_architecture = bfd_architecture(3); +pub const bfd_architecture_bfd_arch_i960: bfd_architecture = bfd_architecture(4); +pub const bfd_architecture_bfd_arch_a29k: bfd_architecture = bfd_architecture(5); +pub const bfd_architecture_bfd_arch_sparc: bfd_architecture = bfd_architecture(6); +pub const bfd_architecture_bfd_arch_mips: bfd_architecture = bfd_architecture(7); +pub const bfd_architecture_bfd_arch_i386: bfd_architecture = bfd_architecture(8); +pub const bfd_architecture_bfd_arch_we32k: bfd_architecture = bfd_architecture(9); +pub const bfd_architecture_bfd_arch_tahoe: bfd_architecture = bfd_architecture(10); +pub const bfd_architecture_bfd_arch_i860: bfd_architecture = bfd_architecture(11); +pub const bfd_architecture_bfd_arch_romp: bfd_architecture = bfd_architecture(12); +pub const bfd_architecture_bfd_arch_alliant: bfd_architecture = bfd_architecture(13); +pub const bfd_architecture_bfd_arch_convex: bfd_architecture = bfd_architecture(14); +pub const bfd_architecture_bfd_arch_m88k: bfd_architecture = bfd_architecture(15); +pub const bfd_architecture_bfd_arch_pyramid: bfd_architecture = bfd_architecture(16); +pub const bfd_architecture_bfd_arch_h8300: bfd_architecture = bfd_architecture(17); +pub const bfd_architecture_bfd_arch_powerpc: bfd_architecture = bfd_architecture(18); +pub const bfd_architecture_bfd_arch_rs6000: bfd_architecture = bfd_architecture(19); +pub const bfd_architecture_bfd_arch_hppa: bfd_architecture = bfd_architecture(20); +pub const bfd_architecture_bfd_arch_d10v: bfd_architecture = bfd_architecture(21); +pub const bfd_architecture_bfd_arch_z8k: bfd_architecture = bfd_architecture(22); +pub const bfd_architecture_bfd_arch_h8500: bfd_architecture = bfd_architecture(23); +pub const bfd_architecture_bfd_arch_sh: bfd_architecture = bfd_architecture(24); +pub const bfd_architecture_bfd_arch_alpha: bfd_architecture = bfd_architecture(25); +pub const bfd_architecture_bfd_arch_arm: bfd_architecture = bfd_architecture(26); +pub const bfd_architecture_bfd_arch_ns32k: bfd_architecture = bfd_architecture(27); +pub const bfd_architecture_bfd_arch_w65: bfd_architecture = bfd_architecture(28); +pub const bfd_architecture_bfd_arch_tic30: bfd_architecture = bfd_architecture(29); +pub const bfd_architecture_bfd_arch_v850: bfd_architecture = bfd_architecture(30); +pub const bfd_architecture_bfd_arch_arc: bfd_architecture = bfd_architecture(31); +pub const bfd_architecture_bfd_arch_m32r: bfd_architecture = bfd_architecture(32); +pub const bfd_architecture_bfd_arch_mn10200: bfd_architecture = bfd_architecture(33); +pub const bfd_architecture_bfd_arch_mn10300: bfd_architecture = bfd_architecture(34); +pub const bfd_architecture_bfd_arch_avr: bfd_architecture = bfd_architecture(35); +pub const bfd_architecture_bfd_arch_cris: bfd_architecture = bfd_architecture(36); +pub const bfd_architecture_bfd_arch_microblaze: bfd_architecture = bfd_architecture(37); +pub const bfd_architecture_bfd_arch_moxie: bfd_architecture = bfd_architecture(38); +pub const bfd_architecture_bfd_arch_ia64: bfd_architecture = bfd_architecture(39); +pub const bfd_architecture_bfd_arch_nios2: bfd_architecture = bfd_architecture(40); +pub const bfd_architecture_bfd_arch_rx: bfd_architecture = bfd_architecture(41); +pub const bfd_architecture_bfd_arch_loongarch: bfd_architecture = bfd_architecture(42); +pub const bfd_architecture_bfd_arch_last: bfd_architecture = bfd_architecture(43); +impl ::std::ops::BitOr for bfd_architecture { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + bfd_architecture(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for bfd_architecture { + #[inline] + fn bitor_assign(&mut self, rhs: bfd_architecture) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for bfd_architecture { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + bfd_architecture(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for bfd_architecture { + #[inline] + fn bitand_assign(&mut self, rhs: bfd_architecture) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct bfd_architecture(pub ::std::os::raw::c_uint); +#[repr(C)] +#[derive(Copy, Clone)] +pub struct symbol_cache_entry { + pub name: *const ::std::os::raw::c_char, + pub udata: symbol_cache_entry__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union symbol_cache_entry__bindgen_ty_1 { + pub p: PTR, + pub i: bfd_vma, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of symbol_cache_entry__bindgen_ty_1"] + [::std::mem::size_of::() - 8usize]; + ["Alignment of symbol_cache_entry__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: symbol_cache_entry__bindgen_ty_1::p"] + [::std::mem::offset_of!(symbol_cache_entry__bindgen_ty_1, p) - 0usize]; + ["Offset of field: symbol_cache_entry__bindgen_ty_1::i"] + [::std::mem::offset_of!(symbol_cache_entry__bindgen_ty_1, i) - 0usize]; +}; +impl Default for symbol_cache_entry__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for symbol_cache_entry__bindgen_ty_1 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "symbol_cache_entry__bindgen_ty_1 {{ union }}") + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of symbol_cache_entry"][::std::mem::size_of::() - 16usize]; + ["Alignment of symbol_cache_entry"][::std::mem::align_of::() - 8usize]; + ["Offset of field: symbol_cache_entry::name"] + [::std::mem::offset_of!(symbol_cache_entry, name) - 0usize]; + ["Offset of field: symbol_cache_entry::udata"] + [::std::mem::offset_of!(symbol_cache_entry, udata) - 8usize]; +}; +impl Default for symbol_cache_entry { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for symbol_cache_entry { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "symbol_cache_entry {{ name: {:?}, udata: {:?} }}", + self.name, self.udata + ) + } +} +pub type asymbol = symbol_cache_entry; +pub type fprintf_function = ::std::option::Option< + unsafe extern "C" fn( + f: *mut FILE, + fmt: *const ::std::os::raw::c_char, + ... + ) -> ::std::os::raw::c_int, +>; +pub const dis_insn_type_dis_noninsn: dis_insn_type = dis_insn_type(0); +pub const dis_insn_type_dis_nonbranch: dis_insn_type = dis_insn_type(1); +pub const dis_insn_type_dis_branch: dis_insn_type = dis_insn_type(2); +pub const dis_insn_type_dis_condbranch: dis_insn_type = dis_insn_type(3); +pub const dis_insn_type_dis_jsr: dis_insn_type = dis_insn_type(4); +pub const dis_insn_type_dis_condjsr: dis_insn_type = dis_insn_type(5); +pub const dis_insn_type_dis_dref: dis_insn_type = dis_insn_type(6); +pub const dis_insn_type_dis_dref2: dis_insn_type = dis_insn_type(7); +impl ::std::ops::BitOr for dis_insn_type { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + dis_insn_type(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for dis_insn_type { + #[inline] + fn bitor_assign(&mut self, rhs: dis_insn_type) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for dis_insn_type { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + dis_insn_type(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for dis_insn_type { + #[inline] + fn bitand_assign(&mut self, rhs: dis_insn_type) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct dis_insn_type(pub ::std::os::raw::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct disassemble_info { + pub fprintf_func: fprintf_function, + pub stream: *mut FILE, + pub application_data: PTR, + pub flavour: bfd_flavour, + pub arch: bfd_architecture, + pub mach: ::std::os::raw::c_ulong, + pub endian: bfd_endian, + pub symbols: *mut *mut asymbol, + pub num_symbols: ::std::os::raw::c_int, + pub flags: ::std::os::raw::c_ulong, + pub private_data: PTR, + pub read_memory_func: ::std::option::Option< + unsafe extern "C" fn( + memaddr: bfd_vma, + myaddr: *mut bfd_byte, + length: ::std::os::raw::c_int, + info: *mut disassemble_info, + ) -> ::std::os::raw::c_int, + >, + pub memory_error_func: ::std::option::Option< + unsafe extern "C" fn( + status: ::std::os::raw::c_int, + memaddr: bfd_vma, + info: *mut disassemble_info, + ), + >, + pub print_address_func: + ::std::option::Option, + pub print_insn: ::std::option::Option< + unsafe extern "C" fn(addr: bfd_vma, info: *mut disassemble_info) -> ::std::os::raw::c_int, + >, + pub symbol_at_address_func: ::std::option::Option< + unsafe extern "C" fn(addr: bfd_vma, info: *mut disassemble_info) -> ::std::os::raw::c_int, + >, + pub buffer: *const bfd_byte, + pub buffer_vma: bfd_vma, + pub buffer_length: ::std::os::raw::c_int, + pub bytes_per_line: ::std::os::raw::c_int, + pub bytes_per_chunk: ::std::os::raw::c_int, + pub display_endian: bfd_endian, + pub insn_info_valid: ::std::os::raw::c_char, + pub branch_delay_insns: ::std::os::raw::c_char, + pub data_size: ::std::os::raw::c_char, + pub insn_type: dis_insn_type, + pub target: bfd_vma, + pub target2: bfd_vma, + pub disassembler_options: *mut ::std::os::raw::c_char, + pub show_opcodes: bool, + pub target_info: *mut ::std::os::raw::c_void, + pub cap_arch: ::std::os::raw::c_int, + pub cap_mode: ::std::os::raw::c_int, + pub cap_insn_unit: ::std::os::raw::c_int, + pub cap_insn_split: ::std::os::raw::c_int, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of disassemble_info"][::std::mem::size_of::() - 216usize]; + ["Alignment of disassemble_info"][::std::mem::align_of::() - 8usize]; + ["Offset of field: disassemble_info::fprintf_func"] + [::std::mem::offset_of!(disassemble_info, fprintf_func) - 0usize]; + ["Offset of field: disassemble_info::stream"] + [::std::mem::offset_of!(disassemble_info, stream) - 8usize]; + ["Offset of field: disassemble_info::application_data"] + [::std::mem::offset_of!(disassemble_info, application_data) - 16usize]; + ["Offset of field: disassemble_info::flavour"] + [::std::mem::offset_of!(disassemble_info, flavour) - 24usize]; + ["Offset of field: disassemble_info::arch"] + [::std::mem::offset_of!(disassemble_info, arch) - 28usize]; + ["Offset of field: disassemble_info::mach"] + [::std::mem::offset_of!(disassemble_info, mach) - 32usize]; + ["Offset of field: disassemble_info::endian"] + [::std::mem::offset_of!(disassemble_info, endian) - 40usize]; + ["Offset of field: disassemble_info::symbols"] + [::std::mem::offset_of!(disassemble_info, symbols) - 48usize]; + ["Offset of field: disassemble_info::num_symbols"] + [::std::mem::offset_of!(disassemble_info, num_symbols) - 56usize]; + ["Offset of field: disassemble_info::flags"] + [::std::mem::offset_of!(disassemble_info, flags) - 64usize]; + ["Offset of field: disassemble_info::private_data"] + [::std::mem::offset_of!(disassemble_info, private_data) - 72usize]; + ["Offset of field: disassemble_info::read_memory_func"] + [::std::mem::offset_of!(disassemble_info, read_memory_func) - 80usize]; + ["Offset of field: disassemble_info::memory_error_func"] + [::std::mem::offset_of!(disassemble_info, memory_error_func) - 88usize]; + ["Offset of field: disassemble_info::print_address_func"] + [::std::mem::offset_of!(disassemble_info, print_address_func) - 96usize]; + ["Offset of field: disassemble_info::print_insn"] + [::std::mem::offset_of!(disassemble_info, print_insn) - 104usize]; + ["Offset of field: disassemble_info::symbol_at_address_func"] + [::std::mem::offset_of!(disassemble_info, symbol_at_address_func) - 112usize]; + ["Offset of field: disassemble_info::buffer"] + [::std::mem::offset_of!(disassemble_info, buffer) - 120usize]; + ["Offset of field: disassemble_info::buffer_vma"] + [::std::mem::offset_of!(disassemble_info, buffer_vma) - 128usize]; + ["Offset of field: disassemble_info::buffer_length"] + [::std::mem::offset_of!(disassemble_info, buffer_length) - 136usize]; + ["Offset of field: disassemble_info::bytes_per_line"] + [::std::mem::offset_of!(disassemble_info, bytes_per_line) - 140usize]; + ["Offset of field: disassemble_info::bytes_per_chunk"] + [::std::mem::offset_of!(disassemble_info, bytes_per_chunk) - 144usize]; + ["Offset of field: disassemble_info::display_endian"] + [::std::mem::offset_of!(disassemble_info, display_endian) - 148usize]; + ["Offset of field: disassemble_info::insn_info_valid"] + [::std::mem::offset_of!(disassemble_info, insn_info_valid) - 152usize]; + ["Offset of field: disassemble_info::branch_delay_insns"] + [::std::mem::offset_of!(disassemble_info, branch_delay_insns) - 153usize]; + ["Offset of field: disassemble_info::data_size"] + [::std::mem::offset_of!(disassemble_info, data_size) - 154usize]; + ["Offset of field: disassemble_info::insn_type"] + [::std::mem::offset_of!(disassemble_info, insn_type) - 156usize]; + ["Offset of field: disassemble_info::target"] + [::std::mem::offset_of!(disassemble_info, target) - 160usize]; + ["Offset of field: disassemble_info::target2"] + [::std::mem::offset_of!(disassemble_info, target2) - 168usize]; + ["Offset of field: disassemble_info::disassembler_options"] + [::std::mem::offset_of!(disassemble_info, disassembler_options) - 176usize]; + ["Offset of field: disassemble_info::show_opcodes"] + [::std::mem::offset_of!(disassemble_info, show_opcodes) - 184usize]; + ["Offset of field: disassemble_info::target_info"] + [::std::mem::offset_of!(disassemble_info, target_info) - 192usize]; + ["Offset of field: disassemble_info::cap_arch"] + [::std::mem::offset_of!(disassemble_info, cap_arch) - 200usize]; + ["Offset of field: disassemble_info::cap_mode"] + [::std::mem::offset_of!(disassemble_info, cap_mode) - 204usize]; + ["Offset of field: disassemble_info::cap_insn_unit"] + [::std::mem::offset_of!(disassemble_info, cap_insn_unit) - 208usize]; + ["Offset of field: disassemble_info::cap_insn_split"] + [::std::mem::offset_of!(disassemble_info, cap_insn_split) - 212usize]; +}; +impl Default for disassemble_info { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub type hwaddr = u64; +#[doc = " vaddr:\n Type wide enough to contain any #target_ulong virtual address."] +pub type vaddr = u64; +#[repr(C)] +#[derive(Copy, Clone)] +pub union CPUTLBEntry { + pub __bindgen_anon_1: CPUTLBEntry__bindgen_ty_1, + pub addr_idx: [u64; 4usize], +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct CPUTLBEntry__bindgen_ty_1 { + pub addr_read: u64, + pub addr_write: u64, + pub addr_code: u64, + pub addend: usize, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUTLBEntry__bindgen_ty_1"] + [::std::mem::size_of::() - 32usize]; + ["Alignment of CPUTLBEntry__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUTLBEntry__bindgen_ty_1::addr_read"] + [::std::mem::offset_of!(CPUTLBEntry__bindgen_ty_1, addr_read) - 0usize]; + ["Offset of field: CPUTLBEntry__bindgen_ty_1::addr_write"] + [::std::mem::offset_of!(CPUTLBEntry__bindgen_ty_1, addr_write) - 8usize]; + ["Offset of field: CPUTLBEntry__bindgen_ty_1::addr_code"] + [::std::mem::offset_of!(CPUTLBEntry__bindgen_ty_1, addr_code) - 16usize]; + ["Offset of field: CPUTLBEntry__bindgen_ty_1::addend"] + [::std::mem::offset_of!(CPUTLBEntry__bindgen_ty_1, addend) - 24usize]; +}; +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUTLBEntry"][::std::mem::size_of::() - 32usize]; + ["Alignment of CPUTLBEntry"][::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUTLBEntry::addr_idx"] + [::std::mem::offset_of!(CPUTLBEntry, addr_idx) - 0usize]; +}; +impl Default for CPUTLBEntry { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUTLBEntry { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "CPUTLBEntry {{ union }}") + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CPUTLBDescFast { + pub mask: usize, + pub table: *mut CPUTLBEntry, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUTLBDescFast"][::std::mem::size_of::() - 16usize]; + ["Alignment of CPUTLBDescFast"][::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUTLBDescFast::mask"] + [::std::mem::offset_of!(CPUTLBDescFast, mask) - 0usize]; + ["Offset of field: CPUTLBDescFast::table"] + [::std::mem::offset_of!(CPUTLBDescFast, table) - 8usize]; +}; +impl Default for CPUTLBDescFast { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub const ShutdownCause_SHUTDOWN_CAUSE_NONE: ShutdownCause = ShutdownCause(0); +pub const ShutdownCause_SHUTDOWN_CAUSE_HOST_ERROR: ShutdownCause = ShutdownCause(1); +pub const ShutdownCause_SHUTDOWN_CAUSE_HOST_QMP_QUIT: ShutdownCause = ShutdownCause(2); +pub const ShutdownCause_SHUTDOWN_CAUSE_HOST_QMP_SYSTEM_RESET: ShutdownCause = ShutdownCause(3); +pub const ShutdownCause_SHUTDOWN_CAUSE_HOST_SIGNAL: ShutdownCause = ShutdownCause(4); +pub const ShutdownCause_SHUTDOWN_CAUSE_HOST_UI: ShutdownCause = ShutdownCause(5); +pub const ShutdownCause_SHUTDOWN_CAUSE_GUEST_SHUTDOWN: ShutdownCause = ShutdownCause(6); +pub const ShutdownCause_SHUTDOWN_CAUSE_GUEST_RESET: ShutdownCause = ShutdownCause(7); +pub const ShutdownCause_SHUTDOWN_CAUSE_GUEST_PANIC: ShutdownCause = ShutdownCause(8); +pub const ShutdownCause_SHUTDOWN_CAUSE_SUBSYSTEM_RESET: ShutdownCause = ShutdownCause(9); +pub const ShutdownCause_SHUTDOWN_CAUSE_SNAPSHOT_LOAD: ShutdownCause = ShutdownCause(10); +pub const ShutdownCause_SHUTDOWN_CAUSE__MAX: ShutdownCause = ShutdownCause(11); +impl ::std::ops::BitOr for ShutdownCause { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + ShutdownCause(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for ShutdownCause { + #[inline] + fn bitor_assign(&mut self, rhs: ShutdownCause) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for ShutdownCause { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + ShutdownCause(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for ShutdownCause { + #[inline] + fn bitand_assign(&mut self, rhs: ShutdownCause) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct ShutdownCause(pub ::std::os::raw::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct SysemuCPUOps { + _unused: [u8; 0], +} +#[doc = " CPUClass:\n @class_by_name: Callback to map -cpu command line model name to an\n instantiatable CPU type.\n @parse_features: Callback to parse command line arguments.\n @reset_dump_flags: #CPUDumpFlags to use for reset logging.\n @has_work: Callback for checking if there is work to do.\n @mmu_index: Callback for choosing softmmu mmu index;\n may be used internally by memory_rw_debug without TCG.\n @memory_rw_debug: Callback for GDB memory access.\n @dump_state: Callback for dumping state.\n @query_cpu_fast:\n Fill in target specific information for the \"query-cpus-fast\"\n QAPI call.\n @get_arch_id: Callback for getting architecture-dependent CPU ID.\n @set_pc: Callback for setting the Program Counter register. This\n should have the semantics used by the target architecture when\n setting the PC from a source such as an ELF file entry point;\n for example on Arm it will also set the Thumb mode bit based\n on the least significant bit of the new PC value.\n If the target behaviour here is anything other than \"set\n the PC register to the value passed in\" then the target must\n also implement the synchronize_from_tb hook.\n @get_pc: Callback for getting the Program Counter register.\n As above, with the semantics of the target architecture.\n @gdb_read_register: Callback for letting GDB read a register.\n @gdb_write_register: Callback for letting GDB write a register.\n @gdb_adjust_breakpoint: Callback for adjusting the address of a\n breakpoint. Used by AVR to handle a gdb mis-feature with\n its Harvard architecture split code and data.\n @gdb_num_core_regs: Number of core registers accessible to GDB or 0 to infer\n from @gdb_core_xml_file.\n @gdb_core_xml_file: File name for core registers GDB XML description.\n @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop\n before the insn which triggers a watchpoint rather than after it.\n @gdb_arch_name: Optional callback that returns the architecture name known\n to GDB. The caller must free the returned string with g_free.\n @disas_set_info: Setup architecture specific components of disassembly info\n @adjust_watchpoint_address: Perform a target-specific adjustment to an\n address before attempting to match it against watchpoints.\n @deprecation_note: If this CPUClass is deprecated, this field provides\n related information.\n\n Represents a CPU family or model."] +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CPUClass { + pub parent_class: DeviceClass, + pub class_by_name: ::std::option::Option< + unsafe extern "C" fn(cpu_model: *const ::std::os::raw::c_char) -> *mut ObjectClass, + >, + pub parse_features: ::std::option::Option< + unsafe extern "C" fn( + typename: *const ::std::os::raw::c_char, + str_: *mut ::std::os::raw::c_char, + errp: *mut *mut Error, + ), + >, + pub has_work: ::std::option::Option bool>, + pub mmu_index: ::std::option::Option< + unsafe extern "C" fn(cpu: *mut CPUState, ifetch: bool) -> ::std::os::raw::c_int, + >, + pub memory_rw_debug: ::std::option::Option< + unsafe extern "C" fn( + cpu: *mut CPUState, + addr: vaddr, + buf: *mut u8, + len: ::std::os::raw::c_int, + is_write: bool, + ) -> ::std::os::raw::c_int, + >, + pub dump_state: ::std::option::Option< + unsafe extern "C" fn(cpu: *mut CPUState, arg1: *mut FILE, flags: ::std::os::raw::c_int), + >, + pub query_cpu_fast: + ::std::option::Option, + pub get_arch_id: ::std::option::Option i64>, + pub set_pc: ::std::option::Option, + pub get_pc: ::std::option::Option vaddr>, + pub gdb_read_register: ::std::option::Option< + unsafe extern "C" fn( + cpu: *mut CPUState, + buf: *mut GByteArray, + reg: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub gdb_write_register: ::std::option::Option< + unsafe extern "C" fn( + cpu: *mut CPUState, + buf: *mut u8, + reg: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, + >, + pub gdb_adjust_breakpoint: + ::std::option::Option vaddr>, + pub gdb_core_xml_file: *const ::std::os::raw::c_char, + pub gdb_arch_name: + ::std::option::Option *const gchar>, + pub disas_set_info: ::std::option::Option< + unsafe extern "C" fn(cpu: *mut CPUState, info: *mut disassemble_info), + >, + pub deprecation_note: *const ::std::os::raw::c_char, + pub accel_cpu: *mut AccelCPUClass, + pub sysemu_ops: *const SysemuCPUOps, + pub tcg_ops: *const TCGCPUOps, + pub init_accel_cpu: ::std::option::Option< + unsafe extern "C" fn(accel_cpu: *mut AccelCPUClass, cc: *mut CPUClass), + >, + pub reset_dump_flags: ::std::os::raw::c_int, + pub gdb_num_core_regs: ::std::os::raw::c_int, + pub gdb_stop_before_watchpoint: bool, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUClass"][::std::mem::size_of::() - 360usize]; + ["Alignment of CPUClass"][::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUClass::parent_class"] + [::std::mem::offset_of!(CPUClass, parent_class) - 0usize]; + ["Offset of field: CPUClass::class_by_name"] + [::std::mem::offset_of!(CPUClass, class_by_name) - 176usize]; + ["Offset of field: CPUClass::parse_features"] + [::std::mem::offset_of!(CPUClass, parse_features) - 184usize]; + ["Offset of field: CPUClass::has_work"][::std::mem::offset_of!(CPUClass, has_work) - 192usize]; + ["Offset of field: CPUClass::mmu_index"] + [::std::mem::offset_of!(CPUClass, mmu_index) - 200usize]; + ["Offset of field: CPUClass::memory_rw_debug"] + [::std::mem::offset_of!(CPUClass, memory_rw_debug) - 208usize]; + ["Offset of field: CPUClass::dump_state"] + [::std::mem::offset_of!(CPUClass, dump_state) - 216usize]; + ["Offset of field: CPUClass::query_cpu_fast"] + [::std::mem::offset_of!(CPUClass, query_cpu_fast) - 224usize]; + ["Offset of field: CPUClass::get_arch_id"] + [::std::mem::offset_of!(CPUClass, get_arch_id) - 232usize]; + ["Offset of field: CPUClass::set_pc"][::std::mem::offset_of!(CPUClass, set_pc) - 240usize]; + ["Offset of field: CPUClass::get_pc"][::std::mem::offset_of!(CPUClass, get_pc) - 248usize]; + ["Offset of field: CPUClass::gdb_read_register"] + [::std::mem::offset_of!(CPUClass, gdb_read_register) - 256usize]; + ["Offset of field: CPUClass::gdb_write_register"] + [::std::mem::offset_of!(CPUClass, gdb_write_register) - 264usize]; + ["Offset of field: CPUClass::gdb_adjust_breakpoint"] + [::std::mem::offset_of!(CPUClass, gdb_adjust_breakpoint) - 272usize]; + ["Offset of field: CPUClass::gdb_core_xml_file"] + [::std::mem::offset_of!(CPUClass, gdb_core_xml_file) - 280usize]; + ["Offset of field: CPUClass::gdb_arch_name"] + [::std::mem::offset_of!(CPUClass, gdb_arch_name) - 288usize]; + ["Offset of field: CPUClass::disas_set_info"] + [::std::mem::offset_of!(CPUClass, disas_set_info) - 296usize]; + ["Offset of field: CPUClass::deprecation_note"] + [::std::mem::offset_of!(CPUClass, deprecation_note) - 304usize]; + ["Offset of field: CPUClass::accel_cpu"] + [::std::mem::offset_of!(CPUClass, accel_cpu) - 312usize]; + ["Offset of field: CPUClass::sysemu_ops"] + [::std::mem::offset_of!(CPUClass, sysemu_ops) - 320usize]; + ["Offset of field: CPUClass::tcg_ops"][::std::mem::offset_of!(CPUClass, tcg_ops) - 328usize]; + ["Offset of field: CPUClass::init_accel_cpu"] + [::std::mem::offset_of!(CPUClass, init_accel_cpu) - 336usize]; + ["Offset of field: CPUClass::reset_dump_flags"] + [::std::mem::offset_of!(CPUClass, reset_dump_flags) - 344usize]; + ["Offset of field: CPUClass::gdb_num_core_regs"] + [::std::mem::offset_of!(CPUClass, gdb_num_core_regs) - 348usize]; + ["Offset of field: CPUClass::gdb_stop_before_watchpoint"] + [::std::mem::offset_of!(CPUClass, gdb_stop_before_watchpoint) - 352usize]; +}; +impl Default for CPUClass { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CPUTLBEntryFull { + pub xlat_section: hwaddr, + pub phys_addr: hwaddr, + pub attrs: MemTxAttrs, + pub prot: u8, + pub lg_page_size: u8, + pub tlb_fill_flags: u8, + pub slow_flags: [u8; 3usize], + pub extra: CPUTLBEntryFull__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CPUTLBEntryFull__bindgen_ty_1 { + pub arm: CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1 { + pub pte_attrs: u8, + pub shareability: u8, + pub guarded: bool, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1"] + [::std::mem::size_of::() - 3usize]; + ["Alignment of CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1"] + [::std::mem::align_of::() - 1usize]; + ["Offset of field: CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1::pte_attrs"] + [::std::mem::offset_of!(CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1, pte_attrs) - 0usize]; + ["Offset of field: CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1::shareability"][::std::mem::offset_of!( + CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1, + shareability + ) - 1usize]; + ["Offset of field: CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1::guarded"] + [::std::mem::offset_of!(CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1, guarded) - 2usize]; +}; +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUTLBEntryFull__bindgen_ty_1"] + [::std::mem::size_of::() - 3usize]; + ["Alignment of CPUTLBEntryFull__bindgen_ty_1"] + [::std::mem::align_of::() - 1usize]; + ["Offset of field: CPUTLBEntryFull__bindgen_ty_1::arm"] + [::std::mem::offset_of!(CPUTLBEntryFull__bindgen_ty_1, arm) - 0usize]; +}; +impl Default for CPUTLBEntryFull__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUTLBEntryFull__bindgen_ty_1 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "CPUTLBEntryFull__bindgen_ty_1 {{ union }}") + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUTLBEntryFull"][::std::mem::size_of::() - 32usize]; + ["Alignment of CPUTLBEntryFull"][::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUTLBEntryFull::xlat_section"] + [::std::mem::offset_of!(CPUTLBEntryFull, xlat_section) - 0usize]; + ["Offset of field: CPUTLBEntryFull::phys_addr"] + [::std::mem::offset_of!(CPUTLBEntryFull, phys_addr) - 8usize]; + ["Offset of field: CPUTLBEntryFull::attrs"] + [::std::mem::offset_of!(CPUTLBEntryFull, attrs) - 16usize]; + ["Offset of field: CPUTLBEntryFull::prot"] + [::std::mem::offset_of!(CPUTLBEntryFull, prot) - 20usize]; + ["Offset of field: CPUTLBEntryFull::lg_page_size"] + [::std::mem::offset_of!(CPUTLBEntryFull, lg_page_size) - 21usize]; + ["Offset of field: CPUTLBEntryFull::tlb_fill_flags"] + [::std::mem::offset_of!(CPUTLBEntryFull, tlb_fill_flags) - 22usize]; + ["Offset of field: CPUTLBEntryFull::slow_flags"] + [::std::mem::offset_of!(CPUTLBEntryFull, slow_flags) - 23usize]; + ["Offset of field: CPUTLBEntryFull::extra"] + [::std::mem::offset_of!(CPUTLBEntryFull, extra) - 26usize]; +}; +impl Default for CPUTLBEntryFull { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUTLBEntryFull { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "CPUTLBEntryFull {{ attrs: {:?}, slow_flags: {:?}, extra: {:?} }}", + self.attrs, self.slow_flags, self.extra + ) + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CPUTLBDesc { + pub large_page_addr: vaddr, + pub large_page_mask: vaddr, + pub window_begin_ns: i64, + pub window_max_entries: usize, + pub n_used_entries: usize, + pub vindex: usize, + pub vtable: [CPUTLBEntry; 8usize], + pub vfulltlb: [CPUTLBEntryFull; 8usize], + pub fulltlb: *mut CPUTLBEntryFull, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUTLBDesc"][::std::mem::size_of::() - 568usize]; + ["Alignment of CPUTLBDesc"][::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUTLBDesc::large_page_addr"] + [::std::mem::offset_of!(CPUTLBDesc, large_page_addr) - 0usize]; + ["Offset of field: CPUTLBDesc::large_page_mask"] + [::std::mem::offset_of!(CPUTLBDesc, large_page_mask) - 8usize]; + ["Offset of field: CPUTLBDesc::window_begin_ns"] + [::std::mem::offset_of!(CPUTLBDesc, window_begin_ns) - 16usize]; + ["Offset of field: CPUTLBDesc::window_max_entries"] + [::std::mem::offset_of!(CPUTLBDesc, window_max_entries) - 24usize]; + ["Offset of field: CPUTLBDesc::n_used_entries"] + [::std::mem::offset_of!(CPUTLBDesc, n_used_entries) - 32usize]; + ["Offset of field: CPUTLBDesc::vindex"][::std::mem::offset_of!(CPUTLBDesc, vindex) - 40usize]; + ["Offset of field: CPUTLBDesc::vtable"][::std::mem::offset_of!(CPUTLBDesc, vtable) - 48usize]; + ["Offset of field: CPUTLBDesc::vfulltlb"] + [::std::mem::offset_of!(CPUTLBDesc, vfulltlb) - 304usize]; + ["Offset of field: CPUTLBDesc::fulltlb"] + [::std::mem::offset_of!(CPUTLBDesc, fulltlb) - 560usize]; +}; +impl Default for CPUTLBDesc { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUTLBDesc { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "CPUTLBDesc {{ vtable: {:?}, vfulltlb: {:?}, fulltlb: {:?} }}", + self.vtable, self.vfulltlb, self.fulltlb + ) + } +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct CPUTLBCommon { + pub lock: QemuSpin, + pub dirty: u16, + pub full_flush_count: usize, + pub part_flush_count: usize, + pub elide_flush_count: usize, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUTLBCommon"][::std::mem::size_of::() - 32usize]; + ["Alignment of CPUTLBCommon"][::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUTLBCommon::lock"][::std::mem::offset_of!(CPUTLBCommon, lock) - 0usize]; + ["Offset of field: CPUTLBCommon::dirty"][::std::mem::offset_of!(CPUTLBCommon, dirty) - 4usize]; + ["Offset of field: CPUTLBCommon::full_flush_count"] + [::std::mem::offset_of!(CPUTLBCommon, full_flush_count) - 8usize]; + ["Offset of field: CPUTLBCommon::part_flush_count"] + [::std::mem::offset_of!(CPUTLBCommon, part_flush_count) - 16usize]; + ["Offset of field: CPUTLBCommon::elide_flush_count"] + [::std::mem::offset_of!(CPUTLBCommon, elide_flush_count) - 24usize]; +}; +#[repr(C)] +#[repr(align(16))] +#[derive(Copy, Clone)] +pub struct CPUTLB { + pub c: CPUTLBCommon, + pub d: [CPUTLBDesc; 16usize], + pub f: [CPUTLBDescFast; 16usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUTLB"][::std::mem::size_of::() - 9376usize]; + ["Alignment of CPUTLB"][::std::mem::align_of::() - 16usize]; + ["Offset of field: CPUTLB::c"][::std::mem::offset_of!(CPUTLB, c) - 0usize]; + ["Offset of field: CPUTLB::d"][::std::mem::offset_of!(CPUTLB, d) - 32usize]; + ["Offset of field: CPUTLB::f"][::std::mem::offset_of!(CPUTLB, f) - 9120usize]; +}; +impl Default for CPUTLB { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUTLB { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "CPUTLB {{ c: {:?}, d: {:?}, f: {:?} }}", + self.c, self.d, self.f + ) + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union IcountDecr { + pub u32_: u32, + pub u16_: IcountDecr__bindgen_ty_1, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct IcountDecr__bindgen_ty_1 { + pub low: u16, + pub high: u16, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of IcountDecr__bindgen_ty_1"] + [::std::mem::size_of::() - 4usize]; + ["Alignment of IcountDecr__bindgen_ty_1"] + [::std::mem::align_of::() - 2usize]; + ["Offset of field: IcountDecr__bindgen_ty_1::low"] + [::std::mem::offset_of!(IcountDecr__bindgen_ty_1, low) - 0usize]; + ["Offset of field: IcountDecr__bindgen_ty_1::high"] + [::std::mem::offset_of!(IcountDecr__bindgen_ty_1, high) - 2usize]; +}; +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of IcountDecr"][::std::mem::size_of::() - 4usize]; + ["Alignment of IcountDecr"][::std::mem::align_of::() - 4usize]; + ["Offset of field: IcountDecr::u32_"][::std::mem::offset_of!(IcountDecr, u32_) - 0usize]; + ["Offset of field: IcountDecr::u16_"][::std::mem::offset_of!(IcountDecr, u16_) - 0usize]; +}; +impl Default for IcountDecr { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for IcountDecr { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "IcountDecr {{ union }}") + } +} +#[repr(C)] +#[repr(align(16))] +#[derive(Copy, Clone)] +pub struct CPUNegativeOffsetState { + pub tlb: CPUTLB, + pub icount_decr: IcountDecr, + pub can_do_io: bool, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUNegativeOffsetState"][::std::mem::size_of::() - 9392usize]; + ["Alignment of CPUNegativeOffsetState"] + [::std::mem::align_of::() - 16usize]; + ["Offset of field: CPUNegativeOffsetState::tlb"] + [::std::mem::offset_of!(CPUNegativeOffsetState, tlb) - 0usize]; + ["Offset of field: CPUNegativeOffsetState::icount_decr"] + [::std::mem::offset_of!(CPUNegativeOffsetState, icount_decr) - 9376usize]; + ["Offset of field: CPUNegativeOffsetState::can_do_io"] + [::std::mem::offset_of!(CPUNegativeOffsetState, can_do_io) - 9380usize]; +}; +impl Default for CPUNegativeOffsetState { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUNegativeOffsetState { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "CPUNegativeOffsetState {{ tlb: {:?}, icount_decr: {:?}, can_do_io: {:?} }}", + self.tlb, self.icount_decr, self.can_do_io + ) + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CPUBreakpoint { + pub pc: vaddr, + pub flags: ::std::os::raw::c_int, + pub entry: CPUBreakpoint__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CPUBreakpoint__bindgen_ty_1 { + pub tqe_next: *mut CPUBreakpoint, + pub tqe_circ: QTailQLink, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUBreakpoint__bindgen_ty_1"] + [::std::mem::size_of::() - 16usize]; + ["Alignment of CPUBreakpoint__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUBreakpoint__bindgen_ty_1::tqe_next"] + [::std::mem::offset_of!(CPUBreakpoint__bindgen_ty_1, tqe_next) - 0usize]; + ["Offset of field: CPUBreakpoint__bindgen_ty_1::tqe_circ"] + [::std::mem::offset_of!(CPUBreakpoint__bindgen_ty_1, tqe_circ) - 0usize]; +}; +impl Default for CPUBreakpoint__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUBreakpoint__bindgen_ty_1 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "CPUBreakpoint__bindgen_ty_1 {{ union }}") + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUBreakpoint"][::std::mem::size_of::() - 32usize]; + ["Alignment of CPUBreakpoint"][::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUBreakpoint::pc"][::std::mem::offset_of!(CPUBreakpoint, pc) - 0usize]; + ["Offset of field: CPUBreakpoint::flags"] + [::std::mem::offset_of!(CPUBreakpoint, flags) - 8usize]; + ["Offset of field: CPUBreakpoint::entry"] + [::std::mem::offset_of!(CPUBreakpoint, entry) - 16usize]; +}; +impl Default for CPUBreakpoint { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUBreakpoint { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "CPUBreakpoint {{ flags: {:?}, entry: {:?} }}", + self.flags, self.entry + ) + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct CPUWatchpoint { + pub vaddr: vaddr, + pub len: vaddr, + pub hitaddr: vaddr, + pub hitattrs: MemTxAttrs, + pub flags: ::std::os::raw::c_int, + pub entry: CPUWatchpoint__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CPUWatchpoint__bindgen_ty_1 { + pub tqe_next: *mut CPUWatchpoint, + pub tqe_circ: QTailQLink, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUWatchpoint__bindgen_ty_1"] + [::std::mem::size_of::() - 16usize]; + ["Alignment of CPUWatchpoint__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUWatchpoint__bindgen_ty_1::tqe_next"] + [::std::mem::offset_of!(CPUWatchpoint__bindgen_ty_1, tqe_next) - 0usize]; + ["Offset of field: CPUWatchpoint__bindgen_ty_1::tqe_circ"] + [::std::mem::offset_of!(CPUWatchpoint__bindgen_ty_1, tqe_circ) - 0usize]; +}; +impl Default for CPUWatchpoint__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUWatchpoint__bindgen_ty_1 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "CPUWatchpoint__bindgen_ty_1 {{ union }}") + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUWatchpoint"][::std::mem::size_of::() - 48usize]; + ["Alignment of CPUWatchpoint"][::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUWatchpoint::vaddr"] + [::std::mem::offset_of!(CPUWatchpoint, vaddr) - 0usize]; + ["Offset of field: CPUWatchpoint::len"][::std::mem::offset_of!(CPUWatchpoint, len) - 8usize]; + ["Offset of field: CPUWatchpoint::hitaddr"] + [::std::mem::offset_of!(CPUWatchpoint, hitaddr) - 16usize]; + ["Offset of field: CPUWatchpoint::hitattrs"] + [::std::mem::offset_of!(CPUWatchpoint, hitattrs) - 24usize]; + ["Offset of field: CPUWatchpoint::flags"] + [::std::mem::offset_of!(CPUWatchpoint, flags) - 28usize]; + ["Offset of field: CPUWatchpoint::entry"] + [::std::mem::offset_of!(CPUWatchpoint, entry) - 32usize]; +}; +impl Default for CPUWatchpoint { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUWatchpoint { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "CPUWatchpoint {{ hitattrs: {:?}, flags: {:?}, entry: {:?} }}", + self.hitattrs, self.flags, self.entry + ) + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct KVMState { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct kvm_run { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct qemu_work_item { + _unused: [u8; 0], +} +#[doc = " CPUState:\n @cpu_index: CPU index (informative).\n @cluster_index: Identifies which cluster this CPU is in.\n For boards which don't define clusters or for \"loose\" CPUs not assigned\n to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will\n be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER\n QOM parent.\n Under TCG this value is propagated to @tcg_cflags.\n See TranslationBlock::TCG CF_CLUSTER_MASK.\n @tcg_cflags: Pre-computed cflags for this cpu.\n @nr_cores: Number of cores within this CPU package.\n @nr_threads: Number of threads within this CPU core.\n @running: #true if CPU is currently running (lockless).\n @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;\n valid under cpu_list_lock.\n @created: Indicates whether the CPU thread has been successfully created.\n @interrupt_request: Indicates a pending interrupt request.\n @halted: Nonzero if the CPU is in suspended state.\n @stop: Indicates a pending stop request.\n @stopped: Indicates the CPU has been artificially stopped.\n @unplug: Indicates a pending CPU unplug request.\n @crash_occurred: Indicates the OS reported a crash (panic) for this CPU\n @singlestep_enabled: Flags for single-stepping.\n @icount_extra: Instructions until next timer event.\n @neg.can_do_io: True if memory-mapped IO is allowed.\n @cpu_ases: Pointer to array of CPUAddressSpaces (which define the\n AddressSpaces this CPU has)\n @num_ases: number of CPUAddressSpaces in @cpu_ases\n @as: Pointer to the first AddressSpace, for the convenience of targets which\n only have a single AddressSpace\n @gdb_regs: Additional GDB registers.\n @gdb_num_regs: Number of total registers accessible to GDB.\n @gdb_num_g_regs: Number of registers in GDB 'g' packets.\n @node: QTAILQ of CPUs sharing TB cache.\n @opaque: User data.\n @mem_io_pc: Host Program Counter at which the memory was accessed.\n @accel: Pointer to accelerator specific state.\n @kvm_fd: vCPU file descriptor for KVM.\n @work_mutex: Lock to prevent multiple access to @work_list.\n @work_list: List of pending asynchronous work.\n @plugin_mem_cbs: active plugin memory callbacks\n @plugin_state: per-CPU plugin state\n @ignore_memory_transaction_failures: Cached copy of the MachineState\n flag of the same name: allows the board to suppress calling of the\n CPU do_transaction_failed hook function.\n @kvm_dirty_gfns: Points to the KVM dirty ring for this CPU when KVM dirty\n ring is enabled.\n @kvm_fetch_index: Keeps the index that we last fetched from the per-vCPU\n dirty ring structure.\n\n State of one CPU core or thread.\n\n Align, in order to match possible alignment required by CPUArchState,\n and eliminate a hole between CPUState and CPUArchState within ArchCPU."] +#[repr(C)] +#[repr(align(16))] +pub struct CPUState { + pub parent_obj: DeviceState, + pub cc: *mut CPUClass, + pub nr_cores: ::std::os::raw::c_int, + pub nr_threads: ::std::os::raw::c_int, + pub thread: *mut QemuThread, + pub thread_id: ::std::os::raw::c_int, + pub running: bool, + pub has_waiter: bool, + pub halt_cond: *mut QemuCond, + pub thread_kicked: bool, + pub created: bool, + pub stop: bool, + pub stopped: bool, + pub start_powered_off: bool, + pub unplug: bool, + pub crash_occurred: bool, + pub exit_request: bool, + pub exclusive_context_count: ::std::os::raw::c_int, + pub cflags_next_tb: u32, + pub interrupt_request: u32, + pub singlestep_enabled: ::std::os::raw::c_int, + pub icount_budget: i64, + pub icount_extra: i64, + pub random_seed: u64, + pub jmp_env: sigjmp_buf, + pub work_mutex: QemuMutex, + pub work_list: CPUState__bindgen_ty_1, + pub cpu_ases: *mut CPUAddressSpace, + pub num_ases: ::std::os::raw::c_int, + pub as_: *mut AddressSpace, + pub memory: *mut MemoryRegion, + pub tb_jmp_cache: *mut CPUJumpCache, + pub gdb_regs: *mut GArray, + pub gdb_num_regs: ::std::os::raw::c_int, + pub gdb_num_g_regs: ::std::os::raw::c_int, + pub node: CPUState__bindgen_ty_2, + pub breakpoints: CPUState__bindgen_ty_3, + pub watchpoints: CPUState__bindgen_ty_4, + pub watchpoint_hit: *mut CPUWatchpoint, + pub opaque: *mut ::std::os::raw::c_void, + pub mem_io_pc: usize, + pub kvm_fd: ::std::os::raw::c_int, + pub kvm_state: *mut KVMState, + pub kvm_run: *mut kvm_run, + pub kvm_dirty_gfns: *mut kvm_dirty_gfn, + pub kvm_fetch_index: u32, + pub dirty_pages: u64, + pub kvm_vcpu_stats_fd: ::std::os::raw::c_int, + pub in_ioctl_lock: QemuLockCnt, + pub plugin_mem_cbs: *mut GArray, + pub plugin_state: *mut CPUPluginState, + pub cpu_index: ::std::os::raw::c_int, + pub cluster_index: ::std::os::raw::c_int, + pub tcg_cflags: u32, + pub halted: u32, + pub exception_index: i32, + pub accel: *mut AccelCPUState, + pub vcpu_dirty: bool, + pub throttle_thread_scheduled: bool, + pub throttle_us_per_full: i64, + pub ignore_memory_transaction_failures: bool, + pub prctl_unalign_sigbus: bool, + pub iommu_notifiers: *mut GArray, + pub __bindgen_padding_0: [u8; 8usize], + pub neg_align: __IncompleteArrayField<::std::os::raw::c_char>, + pub neg: CPUNegativeOffsetState, +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CPUState__bindgen_ty_1 { + pub sqh_first: *mut qemu_work_item, + pub sqh_last: *mut *mut qemu_work_item, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUState__bindgen_ty_1"][::std::mem::size_of::() - 16usize]; + ["Alignment of CPUState__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUState__bindgen_ty_1::sqh_first"] + [::std::mem::offset_of!(CPUState__bindgen_ty_1, sqh_first) - 0usize]; + ["Offset of field: CPUState__bindgen_ty_1::sqh_last"] + [::std::mem::offset_of!(CPUState__bindgen_ty_1, sqh_last) - 8usize]; +}; +impl Default for CPUState__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CPUState__bindgen_ty_2 { + pub tqe_next: *mut CPUState, + pub tqe_circ: QTailQLink, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUState__bindgen_ty_2"][::std::mem::size_of::() - 16usize]; + ["Alignment of CPUState__bindgen_ty_2"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUState__bindgen_ty_2::tqe_next"] + [::std::mem::offset_of!(CPUState__bindgen_ty_2, tqe_next) - 0usize]; + ["Offset of field: CPUState__bindgen_ty_2::tqe_circ"] + [::std::mem::offset_of!(CPUState__bindgen_ty_2, tqe_circ) - 0usize]; +}; +impl Default for CPUState__bindgen_ty_2 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUState__bindgen_ty_2 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "CPUState__bindgen_ty_2 {{ union }}") + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CPUState__bindgen_ty_3 { + pub tqh_first: *mut CPUBreakpoint, + pub tqh_circ: QTailQLink, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUState__bindgen_ty_3"][::std::mem::size_of::() - 16usize]; + ["Alignment of CPUState__bindgen_ty_3"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUState__bindgen_ty_3::tqh_first"] + [::std::mem::offset_of!(CPUState__bindgen_ty_3, tqh_first) - 0usize]; + ["Offset of field: CPUState__bindgen_ty_3::tqh_circ"] + [::std::mem::offset_of!(CPUState__bindgen_ty_3, tqh_circ) - 0usize]; +}; +impl Default for CPUState__bindgen_ty_3 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUState__bindgen_ty_3 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "CPUState__bindgen_ty_3 {{ union }}") + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union CPUState__bindgen_ty_4 { + pub tqh_first: *mut CPUWatchpoint, + pub tqh_circ: QTailQLink, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUState__bindgen_ty_4"][::std::mem::size_of::() - 16usize]; + ["Alignment of CPUState__bindgen_ty_4"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUState__bindgen_ty_4::tqh_first"] + [::std::mem::offset_of!(CPUState__bindgen_ty_4, tqh_first) - 0usize]; + ["Offset of field: CPUState__bindgen_ty_4::tqh_circ"] + [::std::mem::offset_of!(CPUState__bindgen_ty_4, tqh_circ) - 0usize]; +}; +impl Default for CPUState__bindgen_ty_4 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUState__bindgen_ty_4 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "CPUState__bindgen_ty_4 {{ union }}") + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUState"][::std::mem::size_of::() - 10176usize]; + ["Alignment of CPUState"][::std::mem::align_of::() - 16usize]; + ["Offset of field: CPUState::parent_obj"] + [::std::mem::offset_of!(CPUState, parent_obj) - 0usize]; + ["Offset of field: CPUState::cc"][::std::mem::offset_of!(CPUState, cc) - 160usize]; + ["Offset of field: CPUState::nr_cores"][::std::mem::offset_of!(CPUState, nr_cores) - 168usize]; + ["Offset of field: CPUState::nr_threads"] + [::std::mem::offset_of!(CPUState, nr_threads) - 172usize]; + ["Offset of field: CPUState::thread"][::std::mem::offset_of!(CPUState, thread) - 176usize]; + ["Offset of field: CPUState::thread_id"] + [::std::mem::offset_of!(CPUState, thread_id) - 184usize]; + ["Offset of field: CPUState::running"][::std::mem::offset_of!(CPUState, running) - 188usize]; + ["Offset of field: CPUState::has_waiter"] + [::std::mem::offset_of!(CPUState, has_waiter) - 189usize]; + ["Offset of field: CPUState::halt_cond"] + [::std::mem::offset_of!(CPUState, halt_cond) - 192usize]; + ["Offset of field: CPUState::thread_kicked"] + [::std::mem::offset_of!(CPUState, thread_kicked) - 200usize]; + ["Offset of field: CPUState::created"][::std::mem::offset_of!(CPUState, created) - 201usize]; + ["Offset of field: CPUState::stop"][::std::mem::offset_of!(CPUState, stop) - 202usize]; + ["Offset of field: CPUState::stopped"][::std::mem::offset_of!(CPUState, stopped) - 203usize]; + ["Offset of field: CPUState::start_powered_off"] + [::std::mem::offset_of!(CPUState, start_powered_off) - 204usize]; + ["Offset of field: CPUState::unplug"][::std::mem::offset_of!(CPUState, unplug) - 205usize]; + ["Offset of field: CPUState::crash_occurred"] + [::std::mem::offset_of!(CPUState, crash_occurred) - 206usize]; + ["Offset of field: CPUState::exit_request"] + [::std::mem::offset_of!(CPUState, exit_request) - 207usize]; + ["Offset of field: CPUState::exclusive_context_count"] + [::std::mem::offset_of!(CPUState, exclusive_context_count) - 208usize]; + ["Offset of field: CPUState::cflags_next_tb"] + [::std::mem::offset_of!(CPUState, cflags_next_tb) - 212usize]; + ["Offset of field: CPUState::interrupt_request"] + [::std::mem::offset_of!(CPUState, interrupt_request) - 216usize]; + ["Offset of field: CPUState::singlestep_enabled"] + [::std::mem::offset_of!(CPUState, singlestep_enabled) - 220usize]; + ["Offset of field: CPUState::icount_budget"] + [::std::mem::offset_of!(CPUState, icount_budget) - 224usize]; + ["Offset of field: CPUState::icount_extra"] + [::std::mem::offset_of!(CPUState, icount_extra) - 232usize]; + ["Offset of field: CPUState::random_seed"] + [::std::mem::offset_of!(CPUState, random_seed) - 240usize]; + ["Offset of field: CPUState::jmp_env"][::std::mem::offset_of!(CPUState, jmp_env) - 248usize]; + ["Offset of field: CPUState::work_mutex"] + [::std::mem::offset_of!(CPUState, work_mutex) - 448usize]; + ["Offset of field: CPUState::work_list"] + [::std::mem::offset_of!(CPUState, work_list) - 496usize]; + ["Offset of field: CPUState::cpu_ases"][::std::mem::offset_of!(CPUState, cpu_ases) - 512usize]; + ["Offset of field: CPUState::num_ases"][::std::mem::offset_of!(CPUState, num_ases) - 520usize]; + ["Offset of field: CPUState::as_"][::std::mem::offset_of!(CPUState, as_) - 528usize]; + ["Offset of field: CPUState::memory"][::std::mem::offset_of!(CPUState, memory) - 536usize]; + ["Offset of field: CPUState::tb_jmp_cache"] + [::std::mem::offset_of!(CPUState, tb_jmp_cache) - 544usize]; + ["Offset of field: CPUState::gdb_regs"][::std::mem::offset_of!(CPUState, gdb_regs) - 552usize]; + ["Offset of field: CPUState::gdb_num_regs"] + [::std::mem::offset_of!(CPUState, gdb_num_regs) - 560usize]; + ["Offset of field: CPUState::gdb_num_g_regs"] + [::std::mem::offset_of!(CPUState, gdb_num_g_regs) - 564usize]; + ["Offset of field: CPUState::node"][::std::mem::offset_of!(CPUState, node) - 568usize]; + ["Offset of field: CPUState::breakpoints"] + [::std::mem::offset_of!(CPUState, breakpoints) - 584usize]; + ["Offset of field: CPUState::watchpoints"] + [::std::mem::offset_of!(CPUState, watchpoints) - 600usize]; + ["Offset of field: CPUState::watchpoint_hit"] + [::std::mem::offset_of!(CPUState, watchpoint_hit) - 616usize]; + ["Offset of field: CPUState::opaque"][::std::mem::offset_of!(CPUState, opaque) - 624usize]; + ["Offset of field: CPUState::mem_io_pc"] + [::std::mem::offset_of!(CPUState, mem_io_pc) - 632usize]; + ["Offset of field: CPUState::kvm_fd"][::std::mem::offset_of!(CPUState, kvm_fd) - 640usize]; + ["Offset of field: CPUState::kvm_state"] + [::std::mem::offset_of!(CPUState, kvm_state) - 648usize]; + ["Offset of field: CPUState::kvm_run"][::std::mem::offset_of!(CPUState, kvm_run) - 656usize]; + ["Offset of field: CPUState::kvm_dirty_gfns"] + [::std::mem::offset_of!(CPUState, kvm_dirty_gfns) - 664usize]; + ["Offset of field: CPUState::kvm_fetch_index"] + [::std::mem::offset_of!(CPUState, kvm_fetch_index) - 672usize]; + ["Offset of field: CPUState::dirty_pages"] + [::std::mem::offset_of!(CPUState, dirty_pages) - 680usize]; + ["Offset of field: CPUState::kvm_vcpu_stats_fd"] + [::std::mem::offset_of!(CPUState, kvm_vcpu_stats_fd) - 688usize]; + ["Offset of field: CPUState::in_ioctl_lock"] + [::std::mem::offset_of!(CPUState, in_ioctl_lock) - 692usize]; + ["Offset of field: CPUState::plugin_mem_cbs"] + [::std::mem::offset_of!(CPUState, plugin_mem_cbs) - 696usize]; + ["Offset of field: CPUState::plugin_state"] + [::std::mem::offset_of!(CPUState, plugin_state) - 704usize]; + ["Offset of field: CPUState::cpu_index"] + [::std::mem::offset_of!(CPUState, cpu_index) - 712usize]; + ["Offset of field: CPUState::cluster_index"] + [::std::mem::offset_of!(CPUState, cluster_index) - 716usize]; + ["Offset of field: CPUState::tcg_cflags"] + [::std::mem::offset_of!(CPUState, tcg_cflags) - 720usize]; + ["Offset of field: CPUState::halted"][::std::mem::offset_of!(CPUState, halted) - 724usize]; + ["Offset of field: CPUState::exception_index"] + [::std::mem::offset_of!(CPUState, exception_index) - 728usize]; + ["Offset of field: CPUState::accel"][::std::mem::offset_of!(CPUState, accel) - 736usize]; + ["Offset of field: CPUState::vcpu_dirty"] + [::std::mem::offset_of!(CPUState, vcpu_dirty) - 744usize]; + ["Offset of field: CPUState::throttle_thread_scheduled"] + [::std::mem::offset_of!(CPUState, throttle_thread_scheduled) - 745usize]; + ["Offset of field: CPUState::throttle_us_per_full"] + [::std::mem::offset_of!(CPUState, throttle_us_per_full) - 752usize]; + ["Offset of field: CPUState::ignore_memory_transaction_failures"] + [::std::mem::offset_of!(CPUState, ignore_memory_transaction_failures) - 760usize]; + ["Offset of field: CPUState::prctl_unalign_sigbus"] + [::std::mem::offset_of!(CPUState, prctl_unalign_sigbus) - 761usize]; + ["Offset of field: CPUState::iommu_notifiers"] + [::std::mem::offset_of!(CPUState, iommu_notifiers) - 768usize]; + ["Offset of field: CPUState::neg_align"] + [::std::mem::offset_of!(CPUState, neg_align) - 784usize]; + ["Offset of field: CPUState::neg"][::std::mem::offset_of!(CPUState, neg) - 784usize]; +}; +impl Default for CPUState { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUState { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write ! (f , "CPUState {{ parent_obj: {:?}, cc: {:?}, nr_cores: {:?}, nr_threads: {:?}, thread: {:?}, thread_id: {:?}, running: {:?}, has_waiter: {:?}, halt_cond: {:?}, thread_kicked: {:?}, created: {:?}, stop: {:?}, stopped: {:?}, start_powered_off: {:?}, unplug: {:?}, crash_occurred: {:?}, exit_request: {:?}, exclusive_context_count: {:?}, singlestep_enabled: {:?}, jmp_env: {:?}, work_mutex: {:?}, work_list: {:?}, cpu_ases: {:?}, num_ases: {:?}, as: {:?}, memory: {:?}, tb_jmp_cache: {:?}, gdb_regs: {:?}, gdb_num_regs: {:?}, gdb_num_g_regs: {:?}, node: {:?}, breakpoints: {:?}, watchpoints: {:?}, watchpoint_hit: {:?}, opaque: {:?}, kvm_fd: {:?}, kvm_state: {:?}, kvm_run: {:?}, kvm_dirty_gfns: {:?}, kvm_vcpu_stats_fd: {:?}, in_ioctl_lock: {:?}, plugin_mem_cbs: {:?}, plugin_state: {:?}, cpu_index: {:?}, cluster_index: {:?}, accel: {:?}, vcpu_dirty: {:?}, throttle_thread_scheduled: {:?}, ignore_memory_transaction_failures: {:?}, prctl_unalign_sigbus: {:?}, iommu_notifiers: {:?}, neg_align: {:?}, neg: {:?} }}" , self . parent_obj , self . cc , self . nr_cores , self . nr_threads , self . thread , self . thread_id , self . running , self . has_waiter , self . halt_cond , self . thread_kicked , self . created , self . stop , self . stopped , self . start_powered_off , self . unplug , self . crash_occurred , self . exit_request , self . exclusive_context_count , self . singlestep_enabled , self . jmp_env , self . work_mutex , self . work_list , self . cpu_ases , self . num_ases , self . as_ , self . memory , self . tb_jmp_cache , self . gdb_regs , self . gdb_num_regs , self . gdb_num_g_regs , self . node , self . breakpoints , self . watchpoints , self . watchpoint_hit , self . opaque , self . kvm_fd , self . kvm_state , self . kvm_run , self . kvm_dirty_gfns , self . kvm_vcpu_stats_fd , self . in_ioctl_lock , self . plugin_mem_cbs , self . plugin_state , self . cpu_index , self . cluster_index , self . accel , self . vcpu_dirty , self . throttle_thread_scheduled , self . ignore_memory_transaction_failures , self . prctl_unalign_sigbus , self . iommu_notifiers , self . neg_align , self . neg) + } +} +extern "C" { + #[doc = " cpu_reset:\n @cpu: The CPU whose state is to be reset."] + pub fn cpu_reset(cpu: *mut CPUState); +} +pub type target_long = i64; +pub type target_ulong = u64; +#[doc = " Property:\n @set_default: true if the default value should be set from @defval,\n in which case @info->set_default_value must not be NULL\n (if false then no default value is set by the property system\n and the field retains whatever value it was given by instance_init).\n @defval: default value for the property. This is used only if @set_default\n is true."] +#[repr(C)] +#[derive(Copy, Clone)] +pub struct Property { + pub name: *const ::std::os::raw::c_char, + pub info: *const PropertyInfo, + pub offset: isize, + pub bitnr: u8, + pub bitmask: u64, + pub set_default: bool, + pub defval: Property__bindgen_ty_1, + pub arrayoffset: ::std::os::raw::c_int, + pub arrayinfo: *const PropertyInfo, + pub arrayfieldsize: ::std::os::raw::c_int, + pub link_type: *const ::std::os::raw::c_char, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union Property__bindgen_ty_1 { + pub i: i64, + pub u: u64, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of Property__bindgen_ty_1"][::std::mem::size_of::() - 8usize]; + ["Alignment of Property__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: Property__bindgen_ty_1::i"] + [::std::mem::offset_of!(Property__bindgen_ty_1, i) - 0usize]; + ["Offset of field: Property__bindgen_ty_1::u"] + [::std::mem::offset_of!(Property__bindgen_ty_1, u) - 0usize]; +}; +impl Default for Property__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for Property__bindgen_ty_1 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "Property__bindgen_ty_1 {{ union }}") + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of Property"][::std::mem::size_of::() - 88usize]; + ["Alignment of Property"][::std::mem::align_of::() - 8usize]; + ["Offset of field: Property::name"][::std::mem::offset_of!(Property, name) - 0usize]; + ["Offset of field: Property::info"][::std::mem::offset_of!(Property, info) - 8usize]; + ["Offset of field: Property::offset"][::std::mem::offset_of!(Property, offset) - 16usize]; + ["Offset of field: Property::bitnr"][::std::mem::offset_of!(Property, bitnr) - 24usize]; + ["Offset of field: Property::bitmask"][::std::mem::offset_of!(Property, bitmask) - 32usize]; + ["Offset of field: Property::set_default"] + [::std::mem::offset_of!(Property, set_default) - 40usize]; + ["Offset of field: Property::defval"][::std::mem::offset_of!(Property, defval) - 48usize]; + ["Offset of field: Property::arrayoffset"] + [::std::mem::offset_of!(Property, arrayoffset) - 56usize]; + ["Offset of field: Property::arrayinfo"][::std::mem::offset_of!(Property, arrayinfo) - 64usize]; + ["Offset of field: Property::arrayfieldsize"] + [::std::mem::offset_of!(Property, arrayfieldsize) - 72usize]; + ["Offset of field: Property::link_type"][::std::mem::offset_of!(Property, link_type) - 80usize]; +}; +impl Default for Property { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for Property { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write ! (f , "Property {{ name: {:?}, info: {:?}, set_default: {:?}, defval: {:?}, arrayoffset: {:?}, arrayinfo: {:?}, arrayfieldsize: {:?}, link_type: {:?} }}" , self . name , self . info , self . set_default , self . defval , self . arrayoffset , self . arrayinfo , self . arrayfieldsize , self . link_type) + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct PropertyInfo { + pub name: *const ::std::os::raw::c_char, + pub description: *const ::std::os::raw::c_char, + pub enum_table: *const QEnumLookup, + pub realized_set_allowed: bool, + pub print: ::std::option::Option< + unsafe extern "C" fn( + obj: *mut Object, + prop: *mut Property, + dest: *mut ::std::os::raw::c_char, + len: usize, + ) -> ::std::os::raw::c_int, + >, + pub set_default_value: + ::std::option::Option, + pub create: ::std::option::Option< + unsafe extern "C" fn( + oc: *mut ObjectClass, + name: *const ::std::os::raw::c_char, + prop: *mut Property, + ) -> *mut ObjectProperty, + >, + pub get: ObjectPropertyAccessor, + pub set: ObjectPropertyAccessor, + pub release: ObjectPropertyRelease, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of PropertyInfo"][::std::mem::size_of::() - 80usize]; + ["Alignment of PropertyInfo"][::std::mem::align_of::() - 8usize]; + ["Offset of field: PropertyInfo::name"][::std::mem::offset_of!(PropertyInfo, name) - 0usize]; + ["Offset of field: PropertyInfo::description"] + [::std::mem::offset_of!(PropertyInfo, description) - 8usize]; + ["Offset of field: PropertyInfo::enum_table"] + [::std::mem::offset_of!(PropertyInfo, enum_table) - 16usize]; + ["Offset of field: PropertyInfo::realized_set_allowed"] + [::std::mem::offset_of!(PropertyInfo, realized_set_allowed) - 24usize]; + ["Offset of field: PropertyInfo::print"][::std::mem::offset_of!(PropertyInfo, print) - 32usize]; + ["Offset of field: PropertyInfo::set_default_value"] + [::std::mem::offset_of!(PropertyInfo, set_default_value) - 40usize]; + ["Offset of field: PropertyInfo::create"] + [::std::mem::offset_of!(PropertyInfo, create) - 48usize]; + ["Offset of field: PropertyInfo::get"][::std::mem::offset_of!(PropertyInfo, get) - 56usize]; + ["Offset of field: PropertyInfo::set"][::std::mem::offset_of!(PropertyInfo, set) - 64usize]; + ["Offset of field: PropertyInfo::release"] + [::std::mem::offset_of!(PropertyInfo, release) - 72usize]; +}; +impl Default for PropertyInfo { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[doc = " X86CPU:\n @env: #CPUX86State\n @migratable: If set, only migratable flags will be accepted when \"enforce\"\n mode is used, and only migratable flags will be included in the \"host\"\n CPU model.\n\n An x86 CPU."] +pub type X86CPU = ArchCPU; +pub const OnOffAuto_ON_OFF_AUTO_AUTO: OnOffAuto = OnOffAuto(0); +pub const OnOffAuto_ON_OFF_AUTO_ON: OnOffAuto = OnOffAuto(1); +pub const OnOffAuto_ON_OFF_AUTO_OFF: OnOffAuto = OnOffAuto(2); +pub const OnOffAuto_ON_OFF_AUTO__MAX: OnOffAuto = OnOffAuto(3); +impl ::std::ops::BitOr for OnOffAuto { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + OnOffAuto(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for OnOffAuto { + #[inline] + fn bitor_assign(&mut self, rhs: OnOffAuto) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for OnOffAuto { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + OnOffAuto(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for OnOffAuto { + #[inline] + fn bitand_assign(&mut self, rhs: OnOffAuto) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct OnOffAuto(pub ::std::os::raw::c_uint); +pub type float16 = u16; +pub type float32 = u32; +pub type float64 = u64; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct floatx80 { + pub low: u64, + pub high: u16, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of floatx80"][::std::mem::size_of::() - 16usize]; + ["Alignment of floatx80"][::std::mem::align_of::() - 8usize]; + ["Offset of field: floatx80::low"][::std::mem::offset_of!(floatx80, low) - 0usize]; + ["Offset of field: floatx80::high"][::std::mem::offset_of!(floatx80, high) - 8usize]; +}; +pub const FloatRoundMode_float_round_nearest_even: FloatRoundMode = FloatRoundMode(0); +pub const FloatRoundMode_float_round_down: FloatRoundMode = FloatRoundMode(1); +pub const FloatRoundMode_float_round_up: FloatRoundMode = FloatRoundMode(2); +pub const FloatRoundMode_float_round_to_zero: FloatRoundMode = FloatRoundMode(3); +pub const FloatRoundMode_float_round_ties_away: FloatRoundMode = FloatRoundMode(4); +pub const FloatRoundMode_float_round_to_odd: FloatRoundMode = FloatRoundMode(5); +pub const FloatRoundMode_float_round_to_odd_inf: FloatRoundMode = FloatRoundMode(6); +impl ::std::ops::BitOr for FloatRoundMode { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + FloatRoundMode(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for FloatRoundMode { + #[inline] + fn bitor_assign(&mut self, rhs: FloatRoundMode) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for FloatRoundMode { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + FloatRoundMode(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for FloatRoundMode { + #[inline] + fn bitand_assign(&mut self, rhs: FloatRoundMode) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct FloatRoundMode(pub ::std::os::raw::c_uchar); +pub const FloatX80RoundPrec_floatx80_precision_x: FloatX80RoundPrec = FloatX80RoundPrec(0); +pub const FloatX80RoundPrec_floatx80_precision_d: FloatX80RoundPrec = FloatX80RoundPrec(1); +pub const FloatX80RoundPrec_floatx80_precision_s: FloatX80RoundPrec = FloatX80RoundPrec(2); +impl ::std::ops::BitOr for FloatX80RoundPrec { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + FloatX80RoundPrec(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for FloatX80RoundPrec { + #[inline] + fn bitor_assign(&mut self, rhs: FloatX80RoundPrec) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for FloatX80RoundPrec { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + FloatX80RoundPrec(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for FloatX80RoundPrec { + #[inline] + fn bitand_assign(&mut self, rhs: FloatX80RoundPrec) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct FloatX80RoundPrec(pub ::std::os::raw::c_uchar); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct float_status { + pub float_exception_flags: u16, + pub float_rounding_mode: FloatRoundMode, + pub floatx80_rounding_precision: FloatX80RoundPrec, + pub tininess_before_rounding: bool, + pub flush_to_zero: bool, + pub flush_inputs_to_zero: bool, + pub default_nan_mode: bool, + pub snan_bit_is_one: bool, + pub use_first_nan: bool, + pub no_signaling_nans: bool, + pub rebias_overflow: bool, + pub rebias_underflow: bool, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of float_status"][::std::mem::size_of::() - 14usize]; + ["Alignment of float_status"][::std::mem::align_of::() - 2usize]; + ["Offset of field: float_status::float_exception_flags"] + [::std::mem::offset_of!(float_status, float_exception_flags) - 0usize]; + ["Offset of field: float_status::float_rounding_mode"] + [::std::mem::offset_of!(float_status, float_rounding_mode) - 2usize]; + ["Offset of field: float_status::floatx80_rounding_precision"] + [::std::mem::offset_of!(float_status, floatx80_rounding_precision) - 3usize]; + ["Offset of field: float_status::tininess_before_rounding"] + [::std::mem::offset_of!(float_status, tininess_before_rounding) - 4usize]; + ["Offset of field: float_status::flush_to_zero"] + [::std::mem::offset_of!(float_status, flush_to_zero) - 5usize]; + ["Offset of field: float_status::flush_inputs_to_zero"] + [::std::mem::offset_of!(float_status, flush_inputs_to_zero) - 6usize]; + ["Offset of field: float_status::default_nan_mode"] + [::std::mem::offset_of!(float_status, default_nan_mode) - 7usize]; + ["Offset of field: float_status::snan_bit_is_one"] + [::std::mem::offset_of!(float_status, snan_bit_is_one) - 8usize]; + ["Offset of field: float_status::use_first_nan"] + [::std::mem::offset_of!(float_status, use_first_nan) - 9usize]; + ["Offset of field: float_status::no_signaling_nans"] + [::std::mem::offset_of!(float_status, no_signaling_nans) - 10usize]; + ["Offset of field: float_status::rebias_overflow"] + [::std::mem::offset_of!(float_status, rebias_overflow) - 11usize]; + ["Offset of field: float_status::rebias_underflow"] + [::std::mem::offset_of!(float_status, rebias_underflow) - 12usize]; +}; +impl Default for float_status { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub type FeatureWordArray = [u64; 39usize]; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct SegmentCache { + pub selector: u32, + pub base: target_ulong, + pub limit: u32, + pub flags: u32, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of SegmentCache"][::std::mem::size_of::() - 24usize]; + ["Alignment of SegmentCache"][::std::mem::align_of::() - 8usize]; + ["Offset of field: SegmentCache::selector"] + [::std::mem::offset_of!(SegmentCache, selector) - 0usize]; + ["Offset of field: SegmentCache::base"][::std::mem::offset_of!(SegmentCache, base) - 8usize]; + ["Offset of field: SegmentCache::limit"][::std::mem::offset_of!(SegmentCache, limit) - 16usize]; + ["Offset of field: SegmentCache::flags"][::std::mem::offset_of!(SegmentCache, flags) - 20usize]; +}; +#[repr(C)] +#[derive(Copy, Clone)] +pub union MMXReg { + pub _b_MMXReg: [u8; 8usize], + pub _w_MMXReg: [u16; 4usize], + pub _l_MMXReg: [u32; 2usize], + pub _q_MMXReg: [u64; 1usize], + pub _s_MMXReg: [float32; 2usize], + pub _d_MMXReg: [float64; 1usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of MMXReg"][::std::mem::size_of::() - 8usize]; + ["Alignment of MMXReg"][::std::mem::align_of::() - 8usize]; + ["Offset of field: MMXReg::_b_MMXReg"][::std::mem::offset_of!(MMXReg, _b_MMXReg) - 0usize]; + ["Offset of field: MMXReg::_w_MMXReg"][::std::mem::offset_of!(MMXReg, _w_MMXReg) - 0usize]; + ["Offset of field: MMXReg::_l_MMXReg"][::std::mem::offset_of!(MMXReg, _l_MMXReg) - 0usize]; + ["Offset of field: MMXReg::_q_MMXReg"][::std::mem::offset_of!(MMXReg, _q_MMXReg) - 0usize]; + ["Offset of field: MMXReg::_s_MMXReg"][::std::mem::offset_of!(MMXReg, _s_MMXReg) - 0usize]; + ["Offset of field: MMXReg::_d_MMXReg"][::std::mem::offset_of!(MMXReg, _d_MMXReg) - 0usize]; +}; +impl Default for MMXReg { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for MMXReg { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "MMXReg {{ union }}") + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union XMMReg { + pub _q_XMMReg: [u64; 2usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of XMMReg"][::std::mem::size_of::() - 16usize]; + ["Alignment of XMMReg"][::std::mem::align_of::() - 8usize]; + ["Offset of field: XMMReg::_q_XMMReg"][::std::mem::offset_of!(XMMReg, _q_XMMReg) - 0usize]; +}; +impl Default for XMMReg { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for XMMReg { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "XMMReg {{ union }}") + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union YMMReg { + pub _q_YMMReg: [u64; 4usize], + pub _x_YMMReg: [XMMReg; 2usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of YMMReg"][::std::mem::size_of::() - 32usize]; + ["Alignment of YMMReg"][::std::mem::align_of::() - 8usize]; + ["Offset of field: YMMReg::_q_YMMReg"][::std::mem::offset_of!(YMMReg, _q_YMMReg) - 0usize]; + ["Offset of field: YMMReg::_x_YMMReg"][::std::mem::offset_of!(YMMReg, _x_YMMReg) - 0usize]; +}; +impl Default for YMMReg { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for YMMReg { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "YMMReg {{ union }}") + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union ZMMReg { + pub _b_ZMMReg: [u8; 64usize], + pub _w_ZMMReg: [u16; 32usize], + pub _l_ZMMReg: [u32; 16usize], + pub _q_ZMMReg: [u64; 8usize], + pub _h_ZMMReg: [float16; 32usize], + pub _s_ZMMReg: [float32; 16usize], + pub _d_ZMMReg: [float64; 8usize], + pub _x_ZMMReg: [XMMReg; 4usize], + pub _y_ZMMReg: [YMMReg; 2usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of ZMMReg"][::std::mem::size_of::() - 64usize]; + ["Alignment of ZMMReg"][::std::mem::align_of::() - 8usize]; + ["Offset of field: ZMMReg::_b_ZMMReg"][::std::mem::offset_of!(ZMMReg, _b_ZMMReg) - 0usize]; + ["Offset of field: ZMMReg::_w_ZMMReg"][::std::mem::offset_of!(ZMMReg, _w_ZMMReg) - 0usize]; + ["Offset of field: ZMMReg::_l_ZMMReg"][::std::mem::offset_of!(ZMMReg, _l_ZMMReg) - 0usize]; + ["Offset of field: ZMMReg::_q_ZMMReg"][::std::mem::offset_of!(ZMMReg, _q_ZMMReg) - 0usize]; + ["Offset of field: ZMMReg::_h_ZMMReg"][::std::mem::offset_of!(ZMMReg, _h_ZMMReg) - 0usize]; + ["Offset of field: ZMMReg::_s_ZMMReg"][::std::mem::offset_of!(ZMMReg, _s_ZMMReg) - 0usize]; + ["Offset of field: ZMMReg::_d_ZMMReg"][::std::mem::offset_of!(ZMMReg, _d_ZMMReg) - 0usize]; + ["Offset of field: ZMMReg::_x_ZMMReg"][::std::mem::offset_of!(ZMMReg, _x_ZMMReg) - 0usize]; + ["Offset of field: ZMMReg::_y_ZMMReg"][::std::mem::offset_of!(ZMMReg, _y_ZMMReg) - 0usize]; +}; +impl Default for ZMMReg { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for ZMMReg { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "ZMMReg {{ union }}") + } +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct BNDReg { + pub lb: u64, + pub ub: u64, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of BNDReg"][::std::mem::size_of::() - 16usize]; + ["Alignment of BNDReg"][::std::mem::align_of::() - 8usize]; + ["Offset of field: BNDReg::lb"][::std::mem::offset_of!(BNDReg, lb) - 0usize]; + ["Offset of field: BNDReg::ub"][::std::mem::offset_of!(BNDReg, ub) - 8usize]; +}; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct BNDCSReg { + pub cfgu: u64, + pub sts: u64, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of BNDCSReg"][::std::mem::size_of::() - 16usize]; + ["Alignment of BNDCSReg"][::std::mem::align_of::() - 8usize]; + ["Offset of field: BNDCSReg::cfgu"][::std::mem::offset_of!(BNDCSReg, cfgu) - 0usize]; + ["Offset of field: BNDCSReg::sts"][::std::mem::offset_of!(BNDCSReg, sts) - 8usize]; +}; +#[repr(C)] +#[repr(align(16))] +#[derive(Copy, Clone)] +pub union FPReg { + pub d: floatx80, + pub mmx: MMXReg, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of FPReg"][::std::mem::size_of::() - 16usize]; + ["Alignment of FPReg"][::std::mem::align_of::() - 16usize]; + ["Offset of field: FPReg::d"][::std::mem::offset_of!(FPReg, d) - 0usize]; + ["Offset of field: FPReg::mmx"][::std::mem::offset_of!(FPReg, mmx) - 0usize]; +}; +impl Default for FPReg { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for FPReg { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "FPReg {{ union }}") + } +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct MTRRVar { + pub base: u64, + pub mask: u64, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of MTRRVar"][::std::mem::size_of::() - 16usize]; + ["Alignment of MTRRVar"][::std::mem::align_of::() - 8usize]; + ["Offset of field: MTRRVar::base"][::std::mem::offset_of!(MTRRVar, base) - 0usize]; + ["Offset of field: MTRRVar::mask"][::std::mem::offset_of!(MTRRVar, mask) - 8usize]; +}; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct LBREntry { + pub from: u64, + pub to: u64, + pub info: u64, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of LBREntry"][::std::mem::size_of::() - 24usize]; + ["Alignment of LBREntry"][::std::mem::align_of::() - 8usize]; + ["Offset of field: LBREntry::from"][::std::mem::offset_of!(LBREntry, from) - 0usize]; + ["Offset of field: LBREntry::to"][::std::mem::offset_of!(LBREntry, to) - 8usize]; + ["Offset of field: LBREntry::info"][::std::mem::offset_of!(LBREntry, info) - 16usize]; +}; +pub const TPRAccess_TPR_ACCESS_READ: TPRAccess = TPRAccess(0); +pub const TPRAccess_TPR_ACCESS_WRITE: TPRAccess = TPRAccess(1); +impl ::std::ops::BitOr for TPRAccess { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + TPRAccess(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for TPRAccess { + #[inline] + fn bitor_assign(&mut self, rhs: TPRAccess) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for TPRAccess { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + TPRAccess(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for TPRAccess { + #[inline] + fn bitand_assign(&mut self, rhs: TPRAccess) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct TPRAccess(pub ::std::os::raw::c_uint); +pub const CacheType_DATA_CACHE: CacheType = CacheType(0); +pub const CacheType_INSTRUCTION_CACHE: CacheType = CacheType(1); +pub const CacheType_UNIFIED_CACHE: CacheType = CacheType(2); +impl ::std::ops::BitOr for CacheType { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + CacheType(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for CacheType { + #[inline] + fn bitor_assign(&mut self, rhs: CacheType) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for CacheType { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + CacheType(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for CacheType { + #[inline] + fn bitand_assign(&mut self, rhs: CacheType) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct CacheType(pub ::std::os::raw::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CPUCacheInfo { + pub type_: CacheType, + pub level: u8, + pub size: u32, + pub line_size: u16, + pub associativity: u8, + pub partitions: u8, + pub sets: u32, + pub lines_per_tag: u8, + pub self_init: bool, + pub no_invd_sharing: bool, + pub inclusive: bool, + pub complex_indexing: bool, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUCacheInfo"][::std::mem::size_of::() - 28usize]; + ["Alignment of CPUCacheInfo"][::std::mem::align_of::() - 4usize]; + ["Offset of field: CPUCacheInfo::type_"][::std::mem::offset_of!(CPUCacheInfo, type_) - 0usize]; + ["Offset of field: CPUCacheInfo::level"][::std::mem::offset_of!(CPUCacheInfo, level) - 4usize]; + ["Offset of field: CPUCacheInfo::size"][::std::mem::offset_of!(CPUCacheInfo, size) - 8usize]; + ["Offset of field: CPUCacheInfo::line_size"] + [::std::mem::offset_of!(CPUCacheInfo, line_size) - 12usize]; + ["Offset of field: CPUCacheInfo::associativity"] + [::std::mem::offset_of!(CPUCacheInfo, associativity) - 14usize]; + ["Offset of field: CPUCacheInfo::partitions"] + [::std::mem::offset_of!(CPUCacheInfo, partitions) - 15usize]; + ["Offset of field: CPUCacheInfo::sets"][::std::mem::offset_of!(CPUCacheInfo, sets) - 16usize]; + ["Offset of field: CPUCacheInfo::lines_per_tag"] + [::std::mem::offset_of!(CPUCacheInfo, lines_per_tag) - 20usize]; + ["Offset of field: CPUCacheInfo::self_init"] + [::std::mem::offset_of!(CPUCacheInfo, self_init) - 21usize]; + ["Offset of field: CPUCacheInfo::no_invd_sharing"] + [::std::mem::offset_of!(CPUCacheInfo, no_invd_sharing) - 22usize]; + ["Offset of field: CPUCacheInfo::inclusive"] + [::std::mem::offset_of!(CPUCacheInfo, inclusive) - 23usize]; + ["Offset of field: CPUCacheInfo::complex_indexing"] + [::std::mem::offset_of!(CPUCacheInfo, complex_indexing) - 24usize]; +}; +impl Default for CPUCacheInfo { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct CPUCaches { + pub l1d_cache: *mut CPUCacheInfo, + pub l1i_cache: *mut CPUCacheInfo, + pub l2_cache: *mut CPUCacheInfo, + pub l3_cache: *mut CPUCacheInfo, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUCaches"][::std::mem::size_of::() - 32usize]; + ["Alignment of CPUCaches"][::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUCaches::l1d_cache"] + [::std::mem::offset_of!(CPUCaches, l1d_cache) - 0usize]; + ["Offset of field: CPUCaches::l1i_cache"] + [::std::mem::offset_of!(CPUCaches, l1i_cache) - 8usize]; + ["Offset of field: CPUCaches::l2_cache"][::std::mem::offset_of!(CPUCaches, l2_cache) - 16usize]; + ["Offset of field: CPUCaches::l3_cache"][::std::mem::offset_of!(CPUCaches, l3_cache) - 24usize]; +}; +impl Default for CPUCaches { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[repr(align(16))] +#[derive(Copy, Clone)] +pub struct CPUArchState { + pub regs: [target_ulong; 16usize], + pub eip: target_ulong, + pub eflags: target_ulong, + pub cc_dst: target_ulong, + pub cc_src: target_ulong, + pub cc_src2: target_ulong, + pub cc_op: u32, + pub df: i32, + pub hflags: u32, + pub hflags2: u32, + pub segs: [SegmentCache; 6usize], + pub ldt: SegmentCache, + pub tr: SegmentCache, + pub gdt: SegmentCache, + pub idt: SegmentCache, + pub cr: [target_ulong; 5usize], + pub pdptrs_valid: bool, + pub pdptrs: [u64; 4usize], + pub a20_mask: i32, + pub bnd_regs: [BNDReg; 4usize], + pub bndcs_regs: BNDCSReg, + pub msr_bndcfgs: u64, + pub efer: u64, + pub start_init_save: CPUArchState__bindgen_ty_1, + pub fpstt: ::std::os::raw::c_uint, + pub fpus: u16, + pub fpuc: u16, + pub fptags: [u8; 8usize], + pub fpregs: [FPReg; 8usize], + pub fpop: u16, + pub fpcs: u16, + pub fpds: u16, + pub fpip: u64, + pub fpdp: u64, + pub fp_status: float_status, + pub ft0: floatx80, + pub mmx_status: float_status, + pub sse_status: float_status, + pub mxcsr: u32, + pub __bindgen_padding_0: u64, + pub xmm_regs: [ZMMReg; 32usize], + pub xmm_t0: ZMMReg, + pub mmx_t0: MMXReg, + pub opmask_regs: [u64; 8usize], + pub xtilecfg: [u8; 64usize], + pub xtiledata: [u8; 8192usize], + pub sysenter_cs: u32, + pub sysenter_esp: target_ulong, + pub sysenter_eip: target_ulong, + pub star: u64, + pub vm_hsave: u64, + pub lstar: target_ulong, + pub cstar: target_ulong, + pub fmask: target_ulong, + pub kernelgsbase: target_ulong, + pub tsc_adjust: u64, + pub tsc_deadline: u64, + pub tsc_aux: u64, + pub xcr0: u64, + pub mcg_status: u64, + pub msr_ia32_misc_enable: u64, + pub msr_ia32_feature_control: u64, + pub msr_ia32_sgxlepubkeyhash: [u64; 4usize], + pub msr_fixed_ctr_ctrl: u64, + pub msr_global_ctrl: u64, + pub msr_global_status: u64, + pub msr_global_ovf_ctrl: u64, + pub msr_fixed_counters: [u64; 3usize], + pub msr_gp_counters: [u64; 18usize], + pub msr_gp_evtsel: [u64; 18usize], + pub pat: u64, + pub smbase: u32, + pub msr_smi_count: u64, + pub pkru: u32, + pub pkrs: u32, + pub tsx_ctrl: u32, + pub spec_ctrl: u64, + pub amd_tsc_scale_msr: u64, + pub virt_ssbd: u64, + pub end_init_save: CPUArchState__bindgen_ty_2, + pub system_time_msr: u64, + pub wall_clock_msr: u64, + pub steal_time_msr: u64, + pub async_pf_en_msr: u64, + pub async_pf_int_msr: u64, + pub pv_eoi_en_msr: u64, + pub poll_control_msr: u64, + pub msr_hv_hypercall: u64, + pub msr_hv_guest_os_id: u64, + pub msr_hv_tsc: u64, + pub msr_hv_syndbg_control: u64, + pub msr_hv_syndbg_status: u64, + pub msr_hv_syndbg_send_page: u64, + pub msr_hv_syndbg_recv_page: u64, + pub msr_hv_syndbg_pending_page: u64, + pub msr_hv_syndbg_options: u64, + pub msr_hv_vapic: u64, + pub msr_hv_crash_params: [u64; 5usize], + pub msr_hv_runtime: u64, + pub msr_hv_synic_control: u64, + pub msr_hv_synic_evt_page: u64, + pub msr_hv_synic_msg_page: u64, + pub msr_hv_synic_sint: [u64; 16usize], + pub msr_hv_stimer_config: [u64; 4usize], + pub msr_hv_stimer_count: [u64; 4usize], + pub msr_hv_reenlightenment_control: u64, + pub msr_hv_tsc_emulation_control: u64, + pub msr_hv_tsc_emulation_status: u64, + pub msr_rtit_ctrl: u64, + pub msr_rtit_status: u64, + pub msr_rtit_output_base: u64, + pub msr_rtit_output_mask: u64, + pub msr_rtit_cr3_match: u64, + pub msr_rtit_addrs: [u64; 8usize], + pub msr_xfd: u64, + pub msr_xfd_err: u64, + pub msr_lbr_ctl: u64, + pub msr_lbr_depth: u64, + pub lbr_records: [LBREntry; 32usize], + pub error_code: ::std::os::raw::c_int, + pub exception_is_int: ::std::os::raw::c_int, + pub exception_next_eip: target_ulong, + pub dr: [target_ulong; 8usize], + pub __bindgen_anon_1: CPUArchState__bindgen_ty_3, + pub old_exception: ::std::os::raw::c_int, + pub vm_vmcb: u64, + pub tsc_offset: u64, + pub intercept: u64, + pub intercept_cr_read: u16, + pub intercept_cr_write: u16, + pub intercept_dr_read: u16, + pub intercept_dr_write: u16, + pub intercept_exceptions: u32, + pub nested_cr3: u64, + pub nested_pg_mode: u32, + pub v_tpr: u8, + pub int_ctl: u32, + pub nmi_injected: u8, + pub nmi_pending: u8, + pub retaddr: usize, + pub end_reset_fields: CPUArchState__bindgen_ty_4, + pub cpuid_level_func7: u32, + pub cpuid_min_level_func7: u32, + pub cpuid_min_level: u32, + pub cpuid_min_xlevel: u32, + pub cpuid_min_xlevel2: u32, + pub cpuid_max_level: u32, + pub cpuid_max_xlevel: u32, + pub cpuid_max_xlevel2: u32, + pub cpuid_level: u32, + pub cpuid_xlevel: u32, + pub cpuid_xlevel2: u32, + pub cpuid_vendor1: u32, + pub cpuid_vendor2: u32, + pub cpuid_vendor3: u32, + pub cpuid_version: u32, + pub features: FeatureWordArray, + pub user_features: FeatureWordArray, + pub cpuid_model: [u32; 12usize], + pub cache_info_cpuid2: CPUCaches, + pub cache_info_cpuid4: CPUCaches, + pub cache_info_amd: CPUCaches, + pub mtrr_fixed: [u64; 11usize], + pub mtrr_deftype: u64, + pub mtrr_var: [MTRRVar; 8usize], + pub mp_state: u32, + pub exception_nr: i32, + pub interrupt_injected: i32, + pub soft_interrupt: u8, + pub exception_pending: u8, + pub exception_injected: u8, + pub has_error_code: u8, + pub exception_has_payload: u8, + pub exception_payload: u64, + pub triple_fault_pending: u8, + pub ins_len: u32, + pub sipi_vector: u32, + pub tsc_valid: bool, + pub tsc_khz: i64, + pub user_tsc_khz: i64, + pub apic_bus_freq: u64, + pub tsc: u64, + pub mcg_cap: u64, + pub mcg_ctl: u64, + pub mcg_ext_ctl: u64, + pub mce_banks: [u64; 40usize], + pub xstate_bv: u64, + pub fpus_vmstate: u16, + pub fptag_vmstate: u16, + pub fpregs_format_vmstate: u16, + pub xss: u64, + pub umwait: u32, + pub tpr_access_type: TPRAccess, + pub nr_dies: ::std::os::raw::c_uint, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct CPUArchState__bindgen_ty_1 {} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUArchState__bindgen_ty_1"] + [::std::mem::size_of::() - 0usize]; + ["Alignment of CPUArchState__bindgen_ty_1"] + [::std::mem::align_of::() - 1usize]; +}; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct CPUArchState__bindgen_ty_2 {} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUArchState__bindgen_ty_2"] + [::std::mem::size_of::() - 0usize]; + ["Alignment of CPUArchState__bindgen_ty_2"] + [::std::mem::align_of::() - 1usize]; +}; +#[repr(C)] +#[derive(Copy, Clone)] +pub union CPUArchState__bindgen_ty_3 { + pub cpu_breakpoint: [*mut CPUBreakpoint; 4usize], + pub cpu_watchpoint: [*mut CPUWatchpoint; 4usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUArchState__bindgen_ty_3"] + [::std::mem::size_of::() - 32usize]; + ["Alignment of CPUArchState__bindgen_ty_3"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUArchState__bindgen_ty_3::cpu_breakpoint"] + [::std::mem::offset_of!(CPUArchState__bindgen_ty_3, cpu_breakpoint) - 0usize]; + ["Offset of field: CPUArchState__bindgen_ty_3::cpu_watchpoint"] + [::std::mem::offset_of!(CPUArchState__bindgen_ty_3, cpu_watchpoint) - 0usize]; +}; +impl Default for CPUArchState__bindgen_ty_3 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUArchState__bindgen_ty_3 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "CPUArchState__bindgen_ty_3 {{ union }}") + } +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct CPUArchState__bindgen_ty_4 {} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUArchState__bindgen_ty_4"] + [::std::mem::size_of::() - 0usize]; + ["Alignment of CPUArchState__bindgen_ty_4"] + [::std::mem::align_of::() - 1usize]; +}; +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUArchState"][::std::mem::size_of::() - 14896usize]; + ["Alignment of CPUArchState"][::std::mem::align_of::() - 16usize]; + ["Offset of field: CPUArchState::regs"][::std::mem::offset_of!(CPUArchState, regs) - 0usize]; + ["Offset of field: CPUArchState::eip"][::std::mem::offset_of!(CPUArchState, eip) - 128usize]; + ["Offset of field: CPUArchState::eflags"] + [::std::mem::offset_of!(CPUArchState, eflags) - 136usize]; + ["Offset of field: CPUArchState::cc_dst"] + [::std::mem::offset_of!(CPUArchState, cc_dst) - 144usize]; + ["Offset of field: CPUArchState::cc_src"] + [::std::mem::offset_of!(CPUArchState, cc_src) - 152usize]; + ["Offset of field: CPUArchState::cc_src2"] + [::std::mem::offset_of!(CPUArchState, cc_src2) - 160usize]; + ["Offset of field: CPUArchState::cc_op"] + [::std::mem::offset_of!(CPUArchState, cc_op) - 168usize]; + ["Offset of field: CPUArchState::df"][::std::mem::offset_of!(CPUArchState, df) - 172usize]; + ["Offset of field: CPUArchState::hflags"] + [::std::mem::offset_of!(CPUArchState, hflags) - 176usize]; + ["Offset of field: CPUArchState::hflags2"] + [::std::mem::offset_of!(CPUArchState, hflags2) - 180usize]; + ["Offset of field: CPUArchState::segs"][::std::mem::offset_of!(CPUArchState, segs) - 184usize]; + ["Offset of field: CPUArchState::ldt"][::std::mem::offset_of!(CPUArchState, ldt) - 328usize]; + ["Offset of field: CPUArchState::tr"][::std::mem::offset_of!(CPUArchState, tr) - 352usize]; + ["Offset of field: CPUArchState::gdt"][::std::mem::offset_of!(CPUArchState, gdt) - 376usize]; + ["Offset of field: CPUArchState::idt"][::std::mem::offset_of!(CPUArchState, idt) - 400usize]; + ["Offset of field: CPUArchState::cr"][::std::mem::offset_of!(CPUArchState, cr) - 424usize]; + ["Offset of field: CPUArchState::pdptrs_valid"] + [::std::mem::offset_of!(CPUArchState, pdptrs_valid) - 464usize]; + ["Offset of field: CPUArchState::pdptrs"] + [::std::mem::offset_of!(CPUArchState, pdptrs) - 472usize]; + ["Offset of field: CPUArchState::a20_mask"] + [::std::mem::offset_of!(CPUArchState, a20_mask) - 504usize]; + ["Offset of field: CPUArchState::bnd_regs"] + [::std::mem::offset_of!(CPUArchState, bnd_regs) - 512usize]; + ["Offset of field: CPUArchState::bndcs_regs"] + [::std::mem::offset_of!(CPUArchState, bndcs_regs) - 576usize]; + ["Offset of field: CPUArchState::msr_bndcfgs"] + [::std::mem::offset_of!(CPUArchState, msr_bndcfgs) - 592usize]; + ["Offset of field: CPUArchState::efer"][::std::mem::offset_of!(CPUArchState, efer) - 600usize]; + ["Offset of field: CPUArchState::start_init_save"] + [::std::mem::offset_of!(CPUArchState, start_init_save) - 608usize]; + ["Offset of field: CPUArchState::fpstt"] + [::std::mem::offset_of!(CPUArchState, fpstt) - 608usize]; + ["Offset of field: CPUArchState::fpus"][::std::mem::offset_of!(CPUArchState, fpus) - 612usize]; + ["Offset of field: CPUArchState::fpuc"][::std::mem::offset_of!(CPUArchState, fpuc) - 614usize]; + ["Offset of field: CPUArchState::fptags"] + [::std::mem::offset_of!(CPUArchState, fptags) - 616usize]; + ["Offset of field: CPUArchState::fpregs"] + [::std::mem::offset_of!(CPUArchState, fpregs) - 624usize]; + ["Offset of field: CPUArchState::fpop"][::std::mem::offset_of!(CPUArchState, fpop) - 752usize]; + ["Offset of field: CPUArchState::fpcs"][::std::mem::offset_of!(CPUArchState, fpcs) - 754usize]; + ["Offset of field: CPUArchState::fpds"][::std::mem::offset_of!(CPUArchState, fpds) - 756usize]; + ["Offset of field: CPUArchState::fpip"][::std::mem::offset_of!(CPUArchState, fpip) - 760usize]; + ["Offset of field: CPUArchState::fpdp"][::std::mem::offset_of!(CPUArchState, fpdp) - 768usize]; + ["Offset of field: CPUArchState::fp_status"] + [::std::mem::offset_of!(CPUArchState, fp_status) - 776usize]; + ["Offset of field: CPUArchState::ft0"][::std::mem::offset_of!(CPUArchState, ft0) - 792usize]; + ["Offset of field: CPUArchState::mmx_status"] + [::std::mem::offset_of!(CPUArchState, mmx_status) - 808usize]; + ["Offset of field: CPUArchState::sse_status"] + [::std::mem::offset_of!(CPUArchState, sse_status) - 822usize]; + ["Offset of field: CPUArchState::mxcsr"] + [::std::mem::offset_of!(CPUArchState, mxcsr) - 836usize]; + ["Offset of field: CPUArchState::xmm_regs"] + [::std::mem::offset_of!(CPUArchState, xmm_regs) - 848usize]; + ["Offset of field: CPUArchState::xmm_t0"] + [::std::mem::offset_of!(CPUArchState, xmm_t0) - 2896usize]; + ["Offset of field: CPUArchState::mmx_t0"] + [::std::mem::offset_of!(CPUArchState, mmx_t0) - 2960usize]; + ["Offset of field: CPUArchState::opmask_regs"] + [::std::mem::offset_of!(CPUArchState, opmask_regs) - 2968usize]; + ["Offset of field: CPUArchState::xtilecfg"] + [::std::mem::offset_of!(CPUArchState, xtilecfg) - 3032usize]; + ["Offset of field: CPUArchState::xtiledata"] + [::std::mem::offset_of!(CPUArchState, xtiledata) - 3096usize]; + ["Offset of field: CPUArchState::sysenter_cs"] + [::std::mem::offset_of!(CPUArchState, sysenter_cs) - 11288usize]; + ["Offset of field: CPUArchState::sysenter_esp"] + [::std::mem::offset_of!(CPUArchState, sysenter_esp) - 11296usize]; + ["Offset of field: CPUArchState::sysenter_eip"] + [::std::mem::offset_of!(CPUArchState, sysenter_eip) - 11304usize]; + ["Offset of field: CPUArchState::star"] + [::std::mem::offset_of!(CPUArchState, star) - 11312usize]; + ["Offset of field: CPUArchState::vm_hsave"] + [::std::mem::offset_of!(CPUArchState, vm_hsave) - 11320usize]; + ["Offset of field: CPUArchState::lstar"] + [::std::mem::offset_of!(CPUArchState, lstar) - 11328usize]; + ["Offset of field: CPUArchState::cstar"] + [::std::mem::offset_of!(CPUArchState, cstar) - 11336usize]; + ["Offset of field: CPUArchState::fmask"] + [::std::mem::offset_of!(CPUArchState, fmask) - 11344usize]; + ["Offset of field: CPUArchState::kernelgsbase"] + [::std::mem::offset_of!(CPUArchState, kernelgsbase) - 11352usize]; + ["Offset of field: CPUArchState::tsc_adjust"] + [::std::mem::offset_of!(CPUArchState, tsc_adjust) - 11360usize]; + ["Offset of field: CPUArchState::tsc_deadline"] + [::std::mem::offset_of!(CPUArchState, tsc_deadline) - 11368usize]; + ["Offset of field: CPUArchState::tsc_aux"] + [::std::mem::offset_of!(CPUArchState, tsc_aux) - 11376usize]; + ["Offset of field: CPUArchState::xcr0"] + [::std::mem::offset_of!(CPUArchState, xcr0) - 11384usize]; + ["Offset of field: CPUArchState::mcg_status"] + [::std::mem::offset_of!(CPUArchState, mcg_status) - 11392usize]; + ["Offset of field: CPUArchState::msr_ia32_misc_enable"] + [::std::mem::offset_of!(CPUArchState, msr_ia32_misc_enable) - 11400usize]; + ["Offset of field: CPUArchState::msr_ia32_feature_control"] + [::std::mem::offset_of!(CPUArchState, msr_ia32_feature_control) - 11408usize]; + ["Offset of field: CPUArchState::msr_ia32_sgxlepubkeyhash"] + [::std::mem::offset_of!(CPUArchState, msr_ia32_sgxlepubkeyhash) - 11416usize]; + ["Offset of field: CPUArchState::msr_fixed_ctr_ctrl"] + [::std::mem::offset_of!(CPUArchState, msr_fixed_ctr_ctrl) - 11448usize]; + ["Offset of field: CPUArchState::msr_global_ctrl"] + [::std::mem::offset_of!(CPUArchState, msr_global_ctrl) - 11456usize]; + ["Offset of field: CPUArchState::msr_global_status"] + [::std::mem::offset_of!(CPUArchState, msr_global_status) - 11464usize]; + ["Offset of field: CPUArchState::msr_global_ovf_ctrl"] + [::std::mem::offset_of!(CPUArchState, msr_global_ovf_ctrl) - 11472usize]; + ["Offset of field: CPUArchState::msr_fixed_counters"] + [::std::mem::offset_of!(CPUArchState, msr_fixed_counters) - 11480usize]; + ["Offset of field: CPUArchState::msr_gp_counters"] + [::std::mem::offset_of!(CPUArchState, msr_gp_counters) - 11504usize]; + ["Offset of field: CPUArchState::msr_gp_evtsel"] + [::std::mem::offset_of!(CPUArchState, msr_gp_evtsel) - 11648usize]; + ["Offset of field: CPUArchState::pat"][::std::mem::offset_of!(CPUArchState, pat) - 11792usize]; + ["Offset of field: CPUArchState::smbase"] + [::std::mem::offset_of!(CPUArchState, smbase) - 11800usize]; + ["Offset of field: CPUArchState::msr_smi_count"] + [::std::mem::offset_of!(CPUArchState, msr_smi_count) - 11808usize]; + ["Offset of field: CPUArchState::pkru"] + [::std::mem::offset_of!(CPUArchState, pkru) - 11816usize]; + ["Offset of field: CPUArchState::pkrs"] + [::std::mem::offset_of!(CPUArchState, pkrs) - 11820usize]; + ["Offset of field: CPUArchState::tsx_ctrl"] + [::std::mem::offset_of!(CPUArchState, tsx_ctrl) - 11824usize]; + ["Offset of field: CPUArchState::spec_ctrl"] + [::std::mem::offset_of!(CPUArchState, spec_ctrl) - 11832usize]; + ["Offset of field: CPUArchState::amd_tsc_scale_msr"] + [::std::mem::offset_of!(CPUArchState, amd_tsc_scale_msr) - 11840usize]; + ["Offset of field: CPUArchState::virt_ssbd"] + [::std::mem::offset_of!(CPUArchState, virt_ssbd) - 11848usize]; + ["Offset of field: CPUArchState::end_init_save"] + [::std::mem::offset_of!(CPUArchState, end_init_save) - 11856usize]; + ["Offset of field: CPUArchState::system_time_msr"] + [::std::mem::offset_of!(CPUArchState, system_time_msr) - 11856usize]; + ["Offset of field: CPUArchState::wall_clock_msr"] + [::std::mem::offset_of!(CPUArchState, wall_clock_msr) - 11864usize]; + ["Offset of field: CPUArchState::steal_time_msr"] + [::std::mem::offset_of!(CPUArchState, steal_time_msr) - 11872usize]; + ["Offset of field: CPUArchState::async_pf_en_msr"] + [::std::mem::offset_of!(CPUArchState, async_pf_en_msr) - 11880usize]; + ["Offset of field: CPUArchState::async_pf_int_msr"] + [::std::mem::offset_of!(CPUArchState, async_pf_int_msr) - 11888usize]; + ["Offset of field: CPUArchState::pv_eoi_en_msr"] + [::std::mem::offset_of!(CPUArchState, pv_eoi_en_msr) - 11896usize]; + ["Offset of field: CPUArchState::poll_control_msr"] + [::std::mem::offset_of!(CPUArchState, poll_control_msr) - 11904usize]; + ["Offset of field: CPUArchState::msr_hv_hypercall"] + [::std::mem::offset_of!(CPUArchState, msr_hv_hypercall) - 11912usize]; + ["Offset of field: CPUArchState::msr_hv_guest_os_id"] + [::std::mem::offset_of!(CPUArchState, msr_hv_guest_os_id) - 11920usize]; + ["Offset of field: CPUArchState::msr_hv_tsc"] + [::std::mem::offset_of!(CPUArchState, msr_hv_tsc) - 11928usize]; + ["Offset of field: CPUArchState::msr_hv_syndbg_control"] + [::std::mem::offset_of!(CPUArchState, msr_hv_syndbg_control) - 11936usize]; + ["Offset of field: CPUArchState::msr_hv_syndbg_status"] + [::std::mem::offset_of!(CPUArchState, msr_hv_syndbg_status) - 11944usize]; + ["Offset of field: CPUArchState::msr_hv_syndbg_send_page"] + [::std::mem::offset_of!(CPUArchState, msr_hv_syndbg_send_page) - 11952usize]; + ["Offset of field: CPUArchState::msr_hv_syndbg_recv_page"] + [::std::mem::offset_of!(CPUArchState, msr_hv_syndbg_recv_page) - 11960usize]; + ["Offset of field: CPUArchState::msr_hv_syndbg_pending_page"] + [::std::mem::offset_of!(CPUArchState, msr_hv_syndbg_pending_page) - 11968usize]; + ["Offset of field: CPUArchState::msr_hv_syndbg_options"] + [::std::mem::offset_of!(CPUArchState, msr_hv_syndbg_options) - 11976usize]; + ["Offset of field: CPUArchState::msr_hv_vapic"] + [::std::mem::offset_of!(CPUArchState, msr_hv_vapic) - 11984usize]; + ["Offset of field: CPUArchState::msr_hv_crash_params"] + [::std::mem::offset_of!(CPUArchState, msr_hv_crash_params) - 11992usize]; + ["Offset of field: CPUArchState::msr_hv_runtime"] + [::std::mem::offset_of!(CPUArchState, msr_hv_runtime) - 12032usize]; + ["Offset of field: CPUArchState::msr_hv_synic_control"] + [::std::mem::offset_of!(CPUArchState, msr_hv_synic_control) - 12040usize]; + ["Offset of field: CPUArchState::msr_hv_synic_evt_page"] + [::std::mem::offset_of!(CPUArchState, msr_hv_synic_evt_page) - 12048usize]; + ["Offset of field: CPUArchState::msr_hv_synic_msg_page"] + [::std::mem::offset_of!(CPUArchState, msr_hv_synic_msg_page) - 12056usize]; + ["Offset of field: CPUArchState::msr_hv_synic_sint"] + [::std::mem::offset_of!(CPUArchState, msr_hv_synic_sint) - 12064usize]; + ["Offset of field: CPUArchState::msr_hv_stimer_config"] + [::std::mem::offset_of!(CPUArchState, msr_hv_stimer_config) - 12192usize]; + ["Offset of field: CPUArchState::msr_hv_stimer_count"] + [::std::mem::offset_of!(CPUArchState, msr_hv_stimer_count) - 12224usize]; + ["Offset of field: CPUArchState::msr_hv_reenlightenment_control"] + [::std::mem::offset_of!(CPUArchState, msr_hv_reenlightenment_control) - 12256usize]; + ["Offset of field: CPUArchState::msr_hv_tsc_emulation_control"] + [::std::mem::offset_of!(CPUArchState, msr_hv_tsc_emulation_control) - 12264usize]; + ["Offset of field: CPUArchState::msr_hv_tsc_emulation_status"] + [::std::mem::offset_of!(CPUArchState, msr_hv_tsc_emulation_status) - 12272usize]; + ["Offset of field: CPUArchState::msr_rtit_ctrl"] + [::std::mem::offset_of!(CPUArchState, msr_rtit_ctrl) - 12280usize]; + ["Offset of field: CPUArchState::msr_rtit_status"] + [::std::mem::offset_of!(CPUArchState, msr_rtit_status) - 12288usize]; + ["Offset of field: CPUArchState::msr_rtit_output_base"] + [::std::mem::offset_of!(CPUArchState, msr_rtit_output_base) - 12296usize]; + ["Offset of field: CPUArchState::msr_rtit_output_mask"] + [::std::mem::offset_of!(CPUArchState, msr_rtit_output_mask) - 12304usize]; + ["Offset of field: CPUArchState::msr_rtit_cr3_match"] + [::std::mem::offset_of!(CPUArchState, msr_rtit_cr3_match) - 12312usize]; + ["Offset of field: CPUArchState::msr_rtit_addrs"] + [::std::mem::offset_of!(CPUArchState, msr_rtit_addrs) - 12320usize]; + ["Offset of field: CPUArchState::msr_xfd"] + [::std::mem::offset_of!(CPUArchState, msr_xfd) - 12384usize]; + ["Offset of field: CPUArchState::msr_xfd_err"] + [::std::mem::offset_of!(CPUArchState, msr_xfd_err) - 12392usize]; + ["Offset of field: CPUArchState::msr_lbr_ctl"] + [::std::mem::offset_of!(CPUArchState, msr_lbr_ctl) - 12400usize]; + ["Offset of field: CPUArchState::msr_lbr_depth"] + [::std::mem::offset_of!(CPUArchState, msr_lbr_depth) - 12408usize]; + ["Offset of field: CPUArchState::lbr_records"] + [::std::mem::offset_of!(CPUArchState, lbr_records) - 12416usize]; + ["Offset of field: CPUArchState::error_code"] + [::std::mem::offset_of!(CPUArchState, error_code) - 13184usize]; + ["Offset of field: CPUArchState::exception_is_int"] + [::std::mem::offset_of!(CPUArchState, exception_is_int) - 13188usize]; + ["Offset of field: CPUArchState::exception_next_eip"] + [::std::mem::offset_of!(CPUArchState, exception_next_eip) - 13192usize]; + ["Offset of field: CPUArchState::dr"][::std::mem::offset_of!(CPUArchState, dr) - 13200usize]; + ["Offset of field: CPUArchState::old_exception"] + [::std::mem::offset_of!(CPUArchState, old_exception) - 13296usize]; + ["Offset of field: CPUArchState::vm_vmcb"] + [::std::mem::offset_of!(CPUArchState, vm_vmcb) - 13304usize]; + ["Offset of field: CPUArchState::tsc_offset"] + [::std::mem::offset_of!(CPUArchState, tsc_offset) - 13312usize]; + ["Offset of field: CPUArchState::intercept"] + [::std::mem::offset_of!(CPUArchState, intercept) - 13320usize]; + ["Offset of field: CPUArchState::intercept_cr_read"] + [::std::mem::offset_of!(CPUArchState, intercept_cr_read) - 13328usize]; + ["Offset of field: CPUArchState::intercept_cr_write"] + [::std::mem::offset_of!(CPUArchState, intercept_cr_write) - 13330usize]; + ["Offset of field: CPUArchState::intercept_dr_read"] + [::std::mem::offset_of!(CPUArchState, intercept_dr_read) - 13332usize]; + ["Offset of field: CPUArchState::intercept_dr_write"] + [::std::mem::offset_of!(CPUArchState, intercept_dr_write) - 13334usize]; + ["Offset of field: CPUArchState::intercept_exceptions"] + [::std::mem::offset_of!(CPUArchState, intercept_exceptions) - 13336usize]; + ["Offset of field: CPUArchState::nested_cr3"] + [::std::mem::offset_of!(CPUArchState, nested_cr3) - 13344usize]; + ["Offset of field: CPUArchState::nested_pg_mode"] + [::std::mem::offset_of!(CPUArchState, nested_pg_mode) - 13352usize]; + ["Offset of field: CPUArchState::v_tpr"] + [::std::mem::offset_of!(CPUArchState, v_tpr) - 13356usize]; + ["Offset of field: CPUArchState::int_ctl"] + [::std::mem::offset_of!(CPUArchState, int_ctl) - 13360usize]; + ["Offset of field: CPUArchState::nmi_injected"] + [::std::mem::offset_of!(CPUArchState, nmi_injected) - 13364usize]; + ["Offset of field: CPUArchState::nmi_pending"] + [::std::mem::offset_of!(CPUArchState, nmi_pending) - 13365usize]; + ["Offset of field: CPUArchState::retaddr"] + [::std::mem::offset_of!(CPUArchState, retaddr) - 13368usize]; + ["Offset of field: CPUArchState::end_reset_fields"] + [::std::mem::offset_of!(CPUArchState, end_reset_fields) - 13376usize]; + ["Offset of field: CPUArchState::cpuid_level_func7"] + [::std::mem::offset_of!(CPUArchState, cpuid_level_func7) - 13376usize]; + ["Offset of field: CPUArchState::cpuid_min_level_func7"] + [::std::mem::offset_of!(CPUArchState, cpuid_min_level_func7) - 13380usize]; + ["Offset of field: CPUArchState::cpuid_min_level"] + [::std::mem::offset_of!(CPUArchState, cpuid_min_level) - 13384usize]; + ["Offset of field: CPUArchState::cpuid_min_xlevel"] + [::std::mem::offset_of!(CPUArchState, cpuid_min_xlevel) - 13388usize]; + ["Offset of field: CPUArchState::cpuid_min_xlevel2"] + [::std::mem::offset_of!(CPUArchState, cpuid_min_xlevel2) - 13392usize]; + ["Offset of field: CPUArchState::cpuid_max_level"] + [::std::mem::offset_of!(CPUArchState, cpuid_max_level) - 13396usize]; + ["Offset of field: CPUArchState::cpuid_max_xlevel"] + [::std::mem::offset_of!(CPUArchState, cpuid_max_xlevel) - 13400usize]; + ["Offset of field: CPUArchState::cpuid_max_xlevel2"] + [::std::mem::offset_of!(CPUArchState, cpuid_max_xlevel2) - 13404usize]; + ["Offset of field: CPUArchState::cpuid_level"] + [::std::mem::offset_of!(CPUArchState, cpuid_level) - 13408usize]; + ["Offset of field: CPUArchState::cpuid_xlevel"] + [::std::mem::offset_of!(CPUArchState, cpuid_xlevel) - 13412usize]; + ["Offset of field: CPUArchState::cpuid_xlevel2"] + [::std::mem::offset_of!(CPUArchState, cpuid_xlevel2) - 13416usize]; + ["Offset of field: CPUArchState::cpuid_vendor1"] + [::std::mem::offset_of!(CPUArchState, cpuid_vendor1) - 13420usize]; + ["Offset of field: CPUArchState::cpuid_vendor2"] + [::std::mem::offset_of!(CPUArchState, cpuid_vendor2) - 13424usize]; + ["Offset of field: CPUArchState::cpuid_vendor3"] + [::std::mem::offset_of!(CPUArchState, cpuid_vendor3) - 13428usize]; + ["Offset of field: CPUArchState::cpuid_version"] + [::std::mem::offset_of!(CPUArchState, cpuid_version) - 13432usize]; + ["Offset of field: CPUArchState::features"] + [::std::mem::offset_of!(CPUArchState, features) - 13440usize]; + ["Offset of field: CPUArchState::user_features"] + [::std::mem::offset_of!(CPUArchState, user_features) - 13752usize]; + ["Offset of field: CPUArchState::cpuid_model"] + [::std::mem::offset_of!(CPUArchState, cpuid_model) - 14064usize]; + ["Offset of field: CPUArchState::cache_info_cpuid2"] + [::std::mem::offset_of!(CPUArchState, cache_info_cpuid2) - 14112usize]; + ["Offset of field: CPUArchState::cache_info_cpuid4"] + [::std::mem::offset_of!(CPUArchState, cache_info_cpuid4) - 14144usize]; + ["Offset of field: CPUArchState::cache_info_amd"] + [::std::mem::offset_of!(CPUArchState, cache_info_amd) - 14176usize]; + ["Offset of field: CPUArchState::mtrr_fixed"] + [::std::mem::offset_of!(CPUArchState, mtrr_fixed) - 14208usize]; + ["Offset of field: CPUArchState::mtrr_deftype"] + [::std::mem::offset_of!(CPUArchState, mtrr_deftype) - 14296usize]; + ["Offset of field: CPUArchState::mtrr_var"] + [::std::mem::offset_of!(CPUArchState, mtrr_var) - 14304usize]; + ["Offset of field: CPUArchState::mp_state"] + [::std::mem::offset_of!(CPUArchState, mp_state) - 14432usize]; + ["Offset of field: CPUArchState::exception_nr"] + [::std::mem::offset_of!(CPUArchState, exception_nr) - 14436usize]; + ["Offset of field: CPUArchState::interrupt_injected"] + [::std::mem::offset_of!(CPUArchState, interrupt_injected) - 14440usize]; + ["Offset of field: CPUArchState::soft_interrupt"] + [::std::mem::offset_of!(CPUArchState, soft_interrupt) - 14444usize]; + ["Offset of field: CPUArchState::exception_pending"] + [::std::mem::offset_of!(CPUArchState, exception_pending) - 14445usize]; + ["Offset of field: CPUArchState::exception_injected"] + [::std::mem::offset_of!(CPUArchState, exception_injected) - 14446usize]; + ["Offset of field: CPUArchState::has_error_code"] + [::std::mem::offset_of!(CPUArchState, has_error_code) - 14447usize]; + ["Offset of field: CPUArchState::exception_has_payload"] + [::std::mem::offset_of!(CPUArchState, exception_has_payload) - 14448usize]; + ["Offset of field: CPUArchState::exception_payload"] + [::std::mem::offset_of!(CPUArchState, exception_payload) - 14456usize]; + ["Offset of field: CPUArchState::triple_fault_pending"] + [::std::mem::offset_of!(CPUArchState, triple_fault_pending) - 14464usize]; + ["Offset of field: CPUArchState::ins_len"] + [::std::mem::offset_of!(CPUArchState, ins_len) - 14468usize]; + ["Offset of field: CPUArchState::sipi_vector"] + [::std::mem::offset_of!(CPUArchState, sipi_vector) - 14472usize]; + ["Offset of field: CPUArchState::tsc_valid"] + [::std::mem::offset_of!(CPUArchState, tsc_valid) - 14476usize]; + ["Offset of field: CPUArchState::tsc_khz"] + [::std::mem::offset_of!(CPUArchState, tsc_khz) - 14480usize]; + ["Offset of field: CPUArchState::user_tsc_khz"] + [::std::mem::offset_of!(CPUArchState, user_tsc_khz) - 14488usize]; + ["Offset of field: CPUArchState::apic_bus_freq"] + [::std::mem::offset_of!(CPUArchState, apic_bus_freq) - 14496usize]; + ["Offset of field: CPUArchState::tsc"][::std::mem::offset_of!(CPUArchState, tsc) - 14504usize]; + ["Offset of field: CPUArchState::mcg_cap"] + [::std::mem::offset_of!(CPUArchState, mcg_cap) - 14512usize]; + ["Offset of field: CPUArchState::mcg_ctl"] + [::std::mem::offset_of!(CPUArchState, mcg_ctl) - 14520usize]; + ["Offset of field: CPUArchState::mcg_ext_ctl"] + [::std::mem::offset_of!(CPUArchState, mcg_ext_ctl) - 14528usize]; + ["Offset of field: CPUArchState::mce_banks"] + [::std::mem::offset_of!(CPUArchState, mce_banks) - 14536usize]; + ["Offset of field: CPUArchState::xstate_bv"] + [::std::mem::offset_of!(CPUArchState, xstate_bv) - 14856usize]; + ["Offset of field: CPUArchState::fpus_vmstate"] + [::std::mem::offset_of!(CPUArchState, fpus_vmstate) - 14864usize]; + ["Offset of field: CPUArchState::fptag_vmstate"] + [::std::mem::offset_of!(CPUArchState, fptag_vmstate) - 14866usize]; + ["Offset of field: CPUArchState::fpregs_format_vmstate"] + [::std::mem::offset_of!(CPUArchState, fpregs_format_vmstate) - 14868usize]; + ["Offset of field: CPUArchState::xss"][::std::mem::offset_of!(CPUArchState, xss) - 14872usize]; + ["Offset of field: CPUArchState::umwait"] + [::std::mem::offset_of!(CPUArchState, umwait) - 14880usize]; + ["Offset of field: CPUArchState::tpr_access_type"] + [::std::mem::offset_of!(CPUArchState, tpr_access_type) - 14884usize]; + ["Offset of field: CPUArchState::nr_dies"] + [::std::mem::offset_of!(CPUArchState, nr_dies) - 14888usize]; +}; +impl Default for CPUArchState { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for CPUArchState { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write ! (f , "CPUArchState {{ regs: {:?}, segs: {:?}, ldt: {:?}, tr: {:?}, gdt: {:?}, idt: {:?}, cr: {:?}, pdptrs_valid: {:?}, pdptrs: {:?}, bnd_regs: {:?}, bndcs_regs: {:?}, start_init_save: {:?}, fpstt: {:?}, fptags: {:?}, fpregs: {:?}, fp_status: {:?}, ft0: {:?}, mmx_status: {:?}, sse_status: {:?}, xmm_regs: {:?}, xmm_t0: {:?}, mmx_t0: {:?}, opmask_regs: {:?}, xtilecfg: {:?}, xtiledata: {:?}, msr_ia32_sgxlepubkeyhash: {:?}, msr_fixed_counters: {:?}, msr_gp_counters: {:?}, msr_gp_evtsel: {:?}, end_init_save: {:?}, msr_hv_crash_params: {:?}, msr_hv_synic_sint: {:?}, msr_hv_stimer_config: {:?}, msr_hv_stimer_count: {:?}, msr_rtit_addrs: {:?}, lbr_records: {:?}, error_code: {:?}, exception_is_int: {:?}, dr: {:?}, __bindgen_anon_1: {:?}, old_exception: {:?}, end_reset_fields: {:?}, features: {:?}, user_features: {:?}, cpuid_model: {:?}, cache_info_cpuid2: {:?}, cache_info_cpuid4: {:?}, cache_info_amd: {:?}, mtrr_fixed: {:?}, mtrr_var: {:?}, tsc_valid: {:?}, mce_banks: {:?}, tpr_access_type: {:?}, nr_dies: {:?} }}" , self . regs , self . segs , self . ldt , self . tr , self . gdt , self . idt , self . cr , self . pdptrs_valid , self . pdptrs , self . bnd_regs , self . bndcs_regs , self . start_init_save , self . fpstt , self . fptags , self . fpregs , self . fp_status , self . ft0 , self . mmx_status , self . sse_status , self . xmm_regs , self . xmm_t0 , self . mmx_t0 , self . opmask_regs , self . xtilecfg , self . xtiledata , self . msr_ia32_sgxlepubkeyhash , self . msr_fixed_counters , self . msr_gp_counters , self . msr_gp_evtsel , self . end_init_save , self . msr_hv_crash_params , self . msr_hv_synic_sint , self . msr_hv_stimer_config , self . msr_hv_stimer_count , self . msr_rtit_addrs , self . lbr_records , self . error_code , self . exception_is_int , self . dr , self . __bindgen_anon_1 , self . old_exception , self . end_reset_fields , self . features , self . user_features , self . cpuid_model , self . cache_info_cpuid2 , self . cache_info_cpuid4 , self . cache_info_amd , self . mtrr_fixed , self . mtrr_var , self . tsc_valid , self . mce_banks , self . tpr_access_type , self . nr_dies) + } +} +pub type CPUX86State = CPUArchState; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct kvm_msrs { + _unused: [u8; 0], +} +#[doc = " X86CPU:\n @env: #CPUX86State\n @migratable: If set, only migratable flags will be accepted when \"enforce\"\n mode is used, and only migratable flags will be included in the \"host\"\n CPU model.\n\n An x86 CPU."] +#[repr(C)] +#[repr(align(16))] +pub struct ArchCPU { + pub parent_obj: CPUState, + pub env: CPUX86State, + pub vmsentry: *mut VMChangeStateEntry, + pub ucode_rev: u64, + pub hyperv_spinlock_attempts: u32, + pub hyperv_vendor: *mut ::std::os::raw::c_char, + pub hyperv_synic_kvm_only: bool, + pub hyperv_features: u64, + pub hyperv_passthrough: bool, + pub hyperv_no_nonarch_cs: OnOffAuto, + pub hyperv_vendor_id: [u32; 3usize], + pub hyperv_interface_id: [u32; 4usize], + pub hyperv_limits: [u32; 3usize], + pub hyperv_enforce_cpuid: bool, + pub hyperv_ver_id_build: u32, + pub hyperv_ver_id_major: u16, + pub hyperv_ver_id_minor: u16, + pub hyperv_ver_id_sp: u32, + pub hyperv_ver_id_sb: u8, + pub hyperv_ver_id_sn: u32, + pub check_cpuid: bool, + pub enforce_cpuid: bool, + pub force_features: bool, + pub expose_kvm: bool, + pub expose_tcg: bool, + pub migratable: bool, + pub migrate_smi_count: bool, + pub max_features: bool, + pub apic_id: u32, + pub vmware_cpuid_freq: bool, + pub cache_info_passthrough: bool, + pub mwait: ArchCPU__bindgen_ty_1, + pub filtered_features: FeatureWordArray, + pub enable_pmu: bool, + pub lbr_fmt: u64, + pub enable_lmce: bool, + pub enable_l3_cache: bool, + pub legacy_cache: bool, + pub enable_cpuid_0xb: bool, + pub full_cpuid_auto_level: bool, + pub vendor_cpuid_only: bool, + pub intel_pt_auto_level: bool, + pub fill_mtrr_mask: bool, + pub host_phys_bits: bool, + pub host_phys_bits_limit: u8, + pub kvm_no_smi_migration: bool, + pub kvm_pv_enforce_cpuid: bool, + pub phys_bits: u32, + pub apic_state: *mut DeviceState, + pub cpu_as_root: *mut MemoryRegion, + pub cpu_as_mem: *mut MemoryRegion, + pub smram: *mut MemoryRegion, + pub machine_done: Notifier, + pub kvm_msr_buf: *mut kvm_msrs, + pub node_id: i32, + pub socket_id: i32, + pub die_id: i32, + pub core_id: i32, + pub thread_id: i32, + pub hv_max_vps: i32, + pub xen_vapic: bool, +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct ArchCPU__bindgen_ty_1 { + pub eax: u32, + pub ebx: u32, + pub ecx: u32, + pub edx: u32, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of ArchCPU__bindgen_ty_1"][::std::mem::size_of::() - 16usize]; + ["Alignment of ArchCPU__bindgen_ty_1"] + [::std::mem::align_of::() - 4usize]; + ["Offset of field: ArchCPU__bindgen_ty_1::eax"] + [::std::mem::offset_of!(ArchCPU__bindgen_ty_1, eax) - 0usize]; + ["Offset of field: ArchCPU__bindgen_ty_1::ebx"] + [::std::mem::offset_of!(ArchCPU__bindgen_ty_1, ebx) - 4usize]; + ["Offset of field: ArchCPU__bindgen_ty_1::ecx"] + [::std::mem::offset_of!(ArchCPU__bindgen_ty_1, ecx) - 8usize]; + ["Offset of field: ArchCPU__bindgen_ty_1::edx"] + [::std::mem::offset_of!(ArchCPU__bindgen_ty_1, edx) - 12usize]; +}; +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of ArchCPU"][::std::mem::size_of::() - 25664usize]; + ["Alignment of ArchCPU"][::std::mem::align_of::() - 16usize]; + ["Offset of field: ArchCPU::parent_obj"][::std::mem::offset_of!(ArchCPU, parent_obj) - 0usize]; + ["Offset of field: ArchCPU::env"][::std::mem::offset_of!(ArchCPU, env) - 10176usize]; + ["Offset of field: ArchCPU::vmsentry"][::std::mem::offset_of!(ArchCPU, vmsentry) - 25072usize]; + ["Offset of field: ArchCPU::ucode_rev"] + [::std::mem::offset_of!(ArchCPU, ucode_rev) - 25080usize]; + ["Offset of field: ArchCPU::hyperv_spinlock_attempts"] + [::std::mem::offset_of!(ArchCPU, hyperv_spinlock_attempts) - 25088usize]; + ["Offset of field: ArchCPU::hyperv_vendor"] + [::std::mem::offset_of!(ArchCPU, hyperv_vendor) - 25096usize]; + ["Offset of field: ArchCPU::hyperv_synic_kvm_only"] + [::std::mem::offset_of!(ArchCPU, hyperv_synic_kvm_only) - 25104usize]; + ["Offset of field: ArchCPU::hyperv_features"] + [::std::mem::offset_of!(ArchCPU, hyperv_features) - 25112usize]; + ["Offset of field: ArchCPU::hyperv_passthrough"] + [::std::mem::offset_of!(ArchCPU, hyperv_passthrough) - 25120usize]; + ["Offset of field: ArchCPU::hyperv_no_nonarch_cs"] + [::std::mem::offset_of!(ArchCPU, hyperv_no_nonarch_cs) - 25124usize]; + ["Offset of field: ArchCPU::hyperv_vendor_id"] + [::std::mem::offset_of!(ArchCPU, hyperv_vendor_id) - 25128usize]; + ["Offset of field: ArchCPU::hyperv_interface_id"] + [::std::mem::offset_of!(ArchCPU, hyperv_interface_id) - 25140usize]; + ["Offset of field: ArchCPU::hyperv_limits"] + [::std::mem::offset_of!(ArchCPU, hyperv_limits) - 25156usize]; + ["Offset of field: ArchCPU::hyperv_enforce_cpuid"] + [::std::mem::offset_of!(ArchCPU, hyperv_enforce_cpuid) - 25168usize]; + ["Offset of field: ArchCPU::hyperv_ver_id_build"] + [::std::mem::offset_of!(ArchCPU, hyperv_ver_id_build) - 25172usize]; + ["Offset of field: ArchCPU::hyperv_ver_id_major"] + [::std::mem::offset_of!(ArchCPU, hyperv_ver_id_major) - 25176usize]; + ["Offset of field: ArchCPU::hyperv_ver_id_minor"] + [::std::mem::offset_of!(ArchCPU, hyperv_ver_id_minor) - 25178usize]; + ["Offset of field: ArchCPU::hyperv_ver_id_sp"] + [::std::mem::offset_of!(ArchCPU, hyperv_ver_id_sp) - 25180usize]; + ["Offset of field: ArchCPU::hyperv_ver_id_sb"] + [::std::mem::offset_of!(ArchCPU, hyperv_ver_id_sb) - 25184usize]; + ["Offset of field: ArchCPU::hyperv_ver_id_sn"] + [::std::mem::offset_of!(ArchCPU, hyperv_ver_id_sn) - 25188usize]; + ["Offset of field: ArchCPU::check_cpuid"] + [::std::mem::offset_of!(ArchCPU, check_cpuid) - 25192usize]; + ["Offset of field: ArchCPU::enforce_cpuid"] + [::std::mem::offset_of!(ArchCPU, enforce_cpuid) - 25193usize]; + ["Offset of field: ArchCPU::force_features"] + [::std::mem::offset_of!(ArchCPU, force_features) - 25194usize]; + ["Offset of field: ArchCPU::expose_kvm"] + [::std::mem::offset_of!(ArchCPU, expose_kvm) - 25195usize]; + ["Offset of field: ArchCPU::expose_tcg"] + [::std::mem::offset_of!(ArchCPU, expose_tcg) - 25196usize]; + ["Offset of field: ArchCPU::migratable"] + [::std::mem::offset_of!(ArchCPU, migratable) - 25197usize]; + ["Offset of field: ArchCPU::migrate_smi_count"] + [::std::mem::offset_of!(ArchCPU, migrate_smi_count) - 25198usize]; + ["Offset of field: ArchCPU::max_features"] + [::std::mem::offset_of!(ArchCPU, max_features) - 25199usize]; + ["Offset of field: ArchCPU::apic_id"][::std::mem::offset_of!(ArchCPU, apic_id) - 25200usize]; + ["Offset of field: ArchCPU::vmware_cpuid_freq"] + [::std::mem::offset_of!(ArchCPU, vmware_cpuid_freq) - 25204usize]; + ["Offset of field: ArchCPU::cache_info_passthrough"] + [::std::mem::offset_of!(ArchCPU, cache_info_passthrough) - 25205usize]; + ["Offset of field: ArchCPU::mwait"][::std::mem::offset_of!(ArchCPU, mwait) - 25208usize]; + ["Offset of field: ArchCPU::filtered_features"] + [::std::mem::offset_of!(ArchCPU, filtered_features) - 25224usize]; + ["Offset of field: ArchCPU::enable_pmu"] + [::std::mem::offset_of!(ArchCPU, enable_pmu) - 25536usize]; + ["Offset of field: ArchCPU::lbr_fmt"][::std::mem::offset_of!(ArchCPU, lbr_fmt) - 25544usize]; + ["Offset of field: ArchCPU::enable_lmce"] + [::std::mem::offset_of!(ArchCPU, enable_lmce) - 25552usize]; + ["Offset of field: ArchCPU::enable_l3_cache"] + [::std::mem::offset_of!(ArchCPU, enable_l3_cache) - 25553usize]; + ["Offset of field: ArchCPU::legacy_cache"] + [::std::mem::offset_of!(ArchCPU, legacy_cache) - 25554usize]; + ["Offset of field: ArchCPU::enable_cpuid_0xb"] + [::std::mem::offset_of!(ArchCPU, enable_cpuid_0xb) - 25555usize]; + ["Offset of field: ArchCPU::full_cpuid_auto_level"] + [::std::mem::offset_of!(ArchCPU, full_cpuid_auto_level) - 25556usize]; + ["Offset of field: ArchCPU::vendor_cpuid_only"] + [::std::mem::offset_of!(ArchCPU, vendor_cpuid_only) - 25557usize]; + ["Offset of field: ArchCPU::intel_pt_auto_level"] + [::std::mem::offset_of!(ArchCPU, intel_pt_auto_level) - 25558usize]; + ["Offset of field: ArchCPU::fill_mtrr_mask"] + [::std::mem::offset_of!(ArchCPU, fill_mtrr_mask) - 25559usize]; + ["Offset of field: ArchCPU::host_phys_bits"] + [::std::mem::offset_of!(ArchCPU, host_phys_bits) - 25560usize]; + ["Offset of field: ArchCPU::host_phys_bits_limit"] + [::std::mem::offset_of!(ArchCPU, host_phys_bits_limit) - 25561usize]; + ["Offset of field: ArchCPU::kvm_no_smi_migration"] + [::std::mem::offset_of!(ArchCPU, kvm_no_smi_migration) - 25562usize]; + ["Offset of field: ArchCPU::kvm_pv_enforce_cpuid"] + [::std::mem::offset_of!(ArchCPU, kvm_pv_enforce_cpuid) - 25563usize]; + ["Offset of field: ArchCPU::phys_bits"] + [::std::mem::offset_of!(ArchCPU, phys_bits) - 25564usize]; + ["Offset of field: ArchCPU::apic_state"] + [::std::mem::offset_of!(ArchCPU, apic_state) - 25568usize]; + ["Offset of field: ArchCPU::cpu_as_root"] + [::std::mem::offset_of!(ArchCPU, cpu_as_root) - 25576usize]; + ["Offset of field: ArchCPU::cpu_as_mem"] + [::std::mem::offset_of!(ArchCPU, cpu_as_mem) - 25584usize]; + ["Offset of field: ArchCPU::smram"][::std::mem::offset_of!(ArchCPU, smram) - 25592usize]; + ["Offset of field: ArchCPU::machine_done"] + [::std::mem::offset_of!(ArchCPU, machine_done) - 25600usize]; + ["Offset of field: ArchCPU::kvm_msr_buf"] + [::std::mem::offset_of!(ArchCPU, kvm_msr_buf) - 25624usize]; + ["Offset of field: ArchCPU::node_id"][::std::mem::offset_of!(ArchCPU, node_id) - 25632usize]; + ["Offset of field: ArchCPU::socket_id"] + [::std::mem::offset_of!(ArchCPU, socket_id) - 25636usize]; + ["Offset of field: ArchCPU::die_id"][::std::mem::offset_of!(ArchCPU, die_id) - 25640usize]; + ["Offset of field: ArchCPU::core_id"][::std::mem::offset_of!(ArchCPU, core_id) - 25644usize]; + ["Offset of field: ArchCPU::thread_id"] + [::std::mem::offset_of!(ArchCPU, thread_id) - 25648usize]; + ["Offset of field: ArchCPU::hv_max_vps"] + [::std::mem::offset_of!(ArchCPU, hv_max_vps) - 25652usize]; + ["Offset of field: ArchCPU::xen_vapic"] + [::std::mem::offset_of!(ArchCPU, xen_vapic) - 25656usize]; +}; +impl Default for ArchCPU { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for ArchCPU { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write ! (f , "ArchCPU {{ parent_obj: {:?}, env: {:?}, vmsentry: {:?}, hyperv_vendor: {:?}, hyperv_synic_kvm_only: {:?}, hyperv_passthrough: {:?}, hyperv_no_nonarch_cs: {:?}, hyperv_vendor_id: {:?}, hyperv_interface_id: {:?}, hyperv_limits: {:?}, hyperv_enforce_cpuid: {:?}, check_cpuid: {:?}, enforce_cpuid: {:?}, force_features: {:?}, expose_kvm: {:?}, expose_tcg: {:?}, migratable: {:?}, migrate_smi_count: {:?}, max_features: {:?}, vmware_cpuid_freq: {:?}, cache_info_passthrough: {:?}, mwait: {:?}, filtered_features: {:?}, enable_pmu: {:?}, enable_lmce: {:?}, enable_l3_cache: {:?}, legacy_cache: {:?}, enable_cpuid_0xb: {:?}, full_cpuid_auto_level: {:?}, vendor_cpuid_only: {:?}, intel_pt_auto_level: {:?}, fill_mtrr_mask: {:?}, host_phys_bits: {:?}, kvm_no_smi_migration: {:?}, kvm_pv_enforce_cpuid: {:?}, apic_state: {:?}, cpu_as_root: {:?}, cpu_as_mem: {:?}, smram: {:?}, machine_done: {:?}, kvm_msr_buf: {:?}, xen_vapic: {:?} }}" , self . parent_obj , self . env , self . vmsentry , self . hyperv_vendor , self . hyperv_synic_kvm_only , self . hyperv_passthrough , self . hyperv_no_nonarch_cs , self . hyperv_vendor_id , self . hyperv_interface_id , self . hyperv_limits , self . hyperv_enforce_cpuid , self . check_cpuid , self . enforce_cpuid , self . force_features , self . expose_kvm , self . expose_tcg , self . migratable , self . migrate_smi_count , self . max_features , self . vmware_cpuid_freq , self . cache_info_passthrough , self . mwait , self . filtered_features , self . enable_pmu , self . enable_lmce , self . enable_l3_cache , self . legacy_cache , self . enable_cpuid_0xb , self . full_cpuid_auto_level , self . vendor_cpuid_only , self . intel_pt_auto_level , self . fill_mtrr_mask , self . host_phys_bits , self . kvm_no_smi_migration , self . kvm_pv_enforce_cpuid , self . apic_state , self . cpu_as_root , self . cpu_as_mem , self . smram , self . machine_done , self . kvm_msr_buf , self . xen_vapic) + } +} +extern "C" { + pub fn cpu_memory_rw_debug( + cpu: *mut CPUState, + addr: vaddr, + ptr: *mut ::std::os::raw::c_void, + len: usize, + is_write: bool, + ) -> ::std::os::raw::c_int; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct RBNode { + pub rb_parent_color: usize, + pub rb_right: *mut RBNode, + pub rb_left: *mut RBNode, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of RBNode"][::std::mem::size_of::() - 24usize]; + ["Alignment of RBNode"][::std::mem::align_of::() - 8usize]; + ["Offset of field: RBNode::rb_parent_color"] + [::std::mem::offset_of!(RBNode, rb_parent_color) - 0usize]; + ["Offset of field: RBNode::rb_right"][::std::mem::offset_of!(RBNode, rb_right) - 8usize]; + ["Offset of field: RBNode::rb_left"][::std::mem::offset_of!(RBNode, rb_left) - 16usize]; +}; +impl Default for RBNode { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct RBRoot { + pub rb_node: *mut RBNode, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of RBRoot"][::std::mem::size_of::() - 8usize]; + ["Alignment of RBRoot"][::std::mem::align_of::() - 8usize]; + ["Offset of field: RBRoot::rb_node"][::std::mem::offset_of!(RBRoot, rb_node) - 0usize]; +}; +impl Default for RBRoot { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct RBRootLeftCached { + pub rb_root: RBRoot, + pub rb_leftmost: *mut RBNode, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of RBRootLeftCached"][::std::mem::size_of::() - 16usize]; + ["Alignment of RBRootLeftCached"][::std::mem::align_of::() - 8usize]; + ["Offset of field: RBRootLeftCached::rb_root"] + [::std::mem::offset_of!(RBRootLeftCached, rb_root) - 0usize]; + ["Offset of field: RBRootLeftCached::rb_leftmost"] + [::std::mem::offset_of!(RBRootLeftCached, rb_leftmost) - 8usize]; +}; +impl Default for RBRootLeftCached { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct IntervalTreeNode { + pub rb: RBNode, + pub start: u64, + pub last: u64, + pub subtree_last: u64, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of IntervalTreeNode"][::std::mem::size_of::() - 48usize]; + ["Alignment of IntervalTreeNode"][::std::mem::align_of::() - 8usize]; + ["Offset of field: IntervalTreeNode::rb"] + [::std::mem::offset_of!(IntervalTreeNode, rb) - 0usize]; + ["Offset of field: IntervalTreeNode::start"] + [::std::mem::offset_of!(IntervalTreeNode, start) - 24usize]; + ["Offset of field: IntervalTreeNode::last"] + [::std::mem::offset_of!(IntervalTreeNode, last) - 32usize]; + ["Offset of field: IntervalTreeNode::subtree_last"] + [::std::mem::offset_of!(IntervalTreeNode, subtree_last) - 40usize]; +}; +impl Default for IntervalTreeNode { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub type IntervalTreeRoot = RBRootLeftCached; +pub type abi_ulong = target_ulong; +pub type abi_long = target_long; +extern "C" { + pub static mut guest_base: usize; +} +extern "C" { + #[doc = " --- Begin LibAFL code ---"] + pub fn pageflags_get_root() -> *mut IntervalTreeRoot; +} +extern "C" { + #[doc = " page_check_range\n @start: first byte of range\n @len: length of range\n @flags: flags required for each page\n\n Return true if every page in [@start, @start+@len) has @flags set.\n Return false if any page is unmapped. Thus testing flags == 0 is\n equivalent to testing for flags == PAGE_VALID."] + pub fn page_check_range( + start: target_ulong, + last: target_ulong, + flags: ::std::os::raw::c_int, + ) -> bool; +} +pub const MemOp_MO_8: MemOp = MemOp(0); +pub const MemOp_MO_16: MemOp = MemOp(1); +pub const MemOp_MO_32: MemOp = MemOp(2); +pub const MemOp_MO_64: MemOp = MemOp(3); +pub const MemOp_MO_128: MemOp = MemOp(4); +pub const MemOp_MO_256: MemOp = MemOp(5); +pub const MemOp_MO_512: MemOp = MemOp(6); +pub const MemOp_MO_1024: MemOp = MemOp(7); +pub const MemOp_MO_SIZE: MemOp = MemOp(7); +pub const MemOp_MO_SIGN: MemOp = MemOp(8); +pub const MemOp_MO_BSWAP: MemOp = MemOp(16); +pub const MemOp_MO_LE: MemOp = MemOp(0); +pub const MemOp_MO_BE: MemOp = MemOp(16); +pub const MemOp_MO_TE: MemOp = MemOp(0); +pub const MemOp_MO_ASHIFT: MemOp = MemOp(5); +pub const MemOp_MO_AMASK: MemOp = MemOp(224); +pub const MemOp_MO_UNALN: MemOp = MemOp(0); +pub const MemOp_MO_ALIGN_2: MemOp = MemOp(32); +pub const MemOp_MO_ALIGN_4: MemOp = MemOp(64); +pub const MemOp_MO_ALIGN_8: MemOp = MemOp(96); +pub const MemOp_MO_ALIGN_16: MemOp = MemOp(128); +pub const MemOp_MO_ALIGN_32: MemOp = MemOp(160); +pub const MemOp_MO_ALIGN_64: MemOp = MemOp(192); +pub const MemOp_MO_ALIGN: MemOp = MemOp(224); +pub const MemOp_MO_ATOM_SHIFT: MemOp = MemOp(8); +pub const MemOp_MO_ATOM_IFALIGN: MemOp = MemOp(0); +pub const MemOp_MO_ATOM_IFALIGN_PAIR: MemOp = MemOp(256); +pub const MemOp_MO_ATOM_WITHIN16: MemOp = MemOp(512); +pub const MemOp_MO_ATOM_WITHIN16_PAIR: MemOp = MemOp(768); +pub const MemOp_MO_ATOM_SUBALIGN: MemOp = MemOp(1024); +pub const MemOp_MO_ATOM_NONE: MemOp = MemOp(1280); +pub const MemOp_MO_ATOM_MASK: MemOp = MemOp(1792); +pub const MemOp_MO_UB: MemOp = MemOp(0); +pub const MemOp_MO_UW: MemOp = MemOp(1); +pub const MemOp_MO_UL: MemOp = MemOp(2); +pub const MemOp_MO_UQ: MemOp = MemOp(3); +pub const MemOp_MO_UO: MemOp = MemOp(4); +pub const MemOp_MO_SB: MemOp = MemOp(8); +pub const MemOp_MO_SW: MemOp = MemOp(9); +pub const MemOp_MO_SL: MemOp = MemOp(10); +pub const MemOp_MO_SQ: MemOp = MemOp(11); +pub const MemOp_MO_SO: MemOp = MemOp(12); +pub const MemOp_MO_LEUW: MemOp = MemOp(1); +pub const MemOp_MO_LEUL: MemOp = MemOp(2); +pub const MemOp_MO_LEUQ: MemOp = MemOp(3); +pub const MemOp_MO_LESW: MemOp = MemOp(9); +pub const MemOp_MO_LESL: MemOp = MemOp(10); +pub const MemOp_MO_LESQ: MemOp = MemOp(11); +pub const MemOp_MO_BEUW: MemOp = MemOp(17); +pub const MemOp_MO_BEUL: MemOp = MemOp(18); +pub const MemOp_MO_BEUQ: MemOp = MemOp(19); +pub const MemOp_MO_BESW: MemOp = MemOp(25); +pub const MemOp_MO_BESL: MemOp = MemOp(26); +pub const MemOp_MO_BESQ: MemOp = MemOp(27); +pub const MemOp_MO_TEUW: MemOp = MemOp(1); +pub const MemOp_MO_TEUL: MemOp = MemOp(2); +pub const MemOp_MO_TEUQ: MemOp = MemOp(3); +pub const MemOp_MO_TEUO: MemOp = MemOp(4); +pub const MemOp_MO_TESW: MemOp = MemOp(9); +pub const MemOp_MO_TESL: MemOp = MemOp(10); +pub const MemOp_MO_TESQ: MemOp = MemOp(11); +pub const MemOp_MO_SSIZE: MemOp = MemOp(15); +impl ::std::ops::BitOr for MemOp { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + MemOp(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for MemOp { + #[inline] + fn bitor_assign(&mut self, rhs: MemOp) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for MemOp { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + MemOp(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for MemOp { + #[inline] + fn bitand_assign(&mut self, rhs: MemOp) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct MemOp(pub ::std::os::raw::c_uint); +pub type MemOpIdx = u32; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct image_info { + pub load_bias: abi_ulong, + pub load_addr: abi_ulong, + pub start_code: abi_ulong, + pub end_code: abi_ulong, + pub start_data: abi_ulong, + pub end_data: abi_ulong, + pub brk: abi_ulong, + pub start_stack: abi_ulong, + pub stack_limit: abi_ulong, + pub vdso: abi_ulong, + pub entry: abi_ulong, + pub code_offset: abi_ulong, + pub data_offset: abi_ulong, + pub saved_auxv: abi_ulong, + pub auxv_len: abi_ulong, + pub argc: abi_ulong, + pub argv: abi_ulong, + pub envc: abi_ulong, + pub envp: abi_ulong, + pub file_string: abi_ulong, + pub elf_flags: u32, + pub personality: ::std::os::raw::c_int, + pub alignment: abi_ulong, + pub exec_stack: bool, + pub arg_strings: abi_ulong, + pub env_strings: abi_ulong, + pub loadmap_addr: abi_ulong, + pub nsegs: u16, + pub loadsegs: *mut ::std::os::raw::c_void, + pub pt_dynamic_addr: abi_ulong, + pub interpreter_loadmap_addr: abi_ulong, + pub interpreter_pt_dynamic_addr: abi_ulong, + pub other_info: *mut image_info, + pub note_flags: u32, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of image_info"][::std::mem::size_of::() - 264usize]; + ["Alignment of image_info"][::std::mem::align_of::() - 8usize]; + ["Offset of field: image_info::load_bias"] + [::std::mem::offset_of!(image_info, load_bias) - 0usize]; + ["Offset of field: image_info::load_addr"] + [::std::mem::offset_of!(image_info, load_addr) - 8usize]; + ["Offset of field: image_info::start_code"] + [::std::mem::offset_of!(image_info, start_code) - 16usize]; + ["Offset of field: image_info::end_code"] + [::std::mem::offset_of!(image_info, end_code) - 24usize]; + ["Offset of field: image_info::start_data"] + [::std::mem::offset_of!(image_info, start_data) - 32usize]; + ["Offset of field: image_info::end_data"] + [::std::mem::offset_of!(image_info, end_data) - 40usize]; + ["Offset of field: image_info::brk"][::std::mem::offset_of!(image_info, brk) - 48usize]; + ["Offset of field: image_info::start_stack"] + [::std::mem::offset_of!(image_info, start_stack) - 56usize]; + ["Offset of field: image_info::stack_limit"] + [::std::mem::offset_of!(image_info, stack_limit) - 64usize]; + ["Offset of field: image_info::vdso"][::std::mem::offset_of!(image_info, vdso) - 72usize]; + ["Offset of field: image_info::entry"][::std::mem::offset_of!(image_info, entry) - 80usize]; + ["Offset of field: image_info::code_offset"] + [::std::mem::offset_of!(image_info, code_offset) - 88usize]; + ["Offset of field: image_info::data_offset"] + [::std::mem::offset_of!(image_info, data_offset) - 96usize]; + ["Offset of field: image_info::saved_auxv"] + [::std::mem::offset_of!(image_info, saved_auxv) - 104usize]; + ["Offset of field: image_info::auxv_len"] + [::std::mem::offset_of!(image_info, auxv_len) - 112usize]; + ["Offset of field: image_info::argc"][::std::mem::offset_of!(image_info, argc) - 120usize]; + ["Offset of field: image_info::argv"][::std::mem::offset_of!(image_info, argv) - 128usize]; + ["Offset of field: image_info::envc"][::std::mem::offset_of!(image_info, envc) - 136usize]; + ["Offset of field: image_info::envp"][::std::mem::offset_of!(image_info, envp) - 144usize]; + ["Offset of field: image_info::file_string"] + [::std::mem::offset_of!(image_info, file_string) - 152usize]; + ["Offset of field: image_info::elf_flags"] + [::std::mem::offset_of!(image_info, elf_flags) - 160usize]; + ["Offset of field: image_info::personality"] + [::std::mem::offset_of!(image_info, personality) - 164usize]; + ["Offset of field: image_info::alignment"] + [::std::mem::offset_of!(image_info, alignment) - 168usize]; + ["Offset of field: image_info::exec_stack"] + [::std::mem::offset_of!(image_info, exec_stack) - 176usize]; + ["Offset of field: image_info::arg_strings"] + [::std::mem::offset_of!(image_info, arg_strings) - 184usize]; + ["Offset of field: image_info::env_strings"] + [::std::mem::offset_of!(image_info, env_strings) - 192usize]; + ["Offset of field: image_info::loadmap_addr"] + [::std::mem::offset_of!(image_info, loadmap_addr) - 200usize]; + ["Offset of field: image_info::nsegs"][::std::mem::offset_of!(image_info, nsegs) - 208usize]; + ["Offset of field: image_info::loadsegs"] + [::std::mem::offset_of!(image_info, loadsegs) - 216usize]; + ["Offset of field: image_info::pt_dynamic_addr"] + [::std::mem::offset_of!(image_info, pt_dynamic_addr) - 224usize]; + ["Offset of field: image_info::interpreter_loadmap_addr"] + [::std::mem::offset_of!(image_info, interpreter_loadmap_addr) - 232usize]; + ["Offset of field: image_info::interpreter_pt_dynamic_addr"] + [::std::mem::offset_of!(image_info, interpreter_pt_dynamic_addr) - 240usize]; + ["Offset of field: image_info::other_info"] + [::std::mem::offset_of!(image_info, other_info) - 248usize]; + ["Offset of field: image_info::note_flags"] + [::std::mem::offset_of!(image_info, note_flags) - 256usize]; +}; +impl Default for image_info { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct tb_tc { + pub ptr: *const ::std::os::raw::c_void, + pub size: usize, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of tb_tc"][::std::mem::size_of::() - 16usize]; + ["Alignment of tb_tc"][::std::mem::align_of::() - 8usize]; + ["Offset of field: tb_tc::ptr"][::std::mem::offset_of!(tb_tc, ptr) - 0usize]; + ["Offset of field: tb_tc::size"][::std::mem::offset_of!(tb_tc, size) - 8usize]; +}; +impl Default for tb_tc { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct TranslationBlock { + pub pc: vaddr, + pub cs_base: u64, + pub flags: u32, + pub cflags: u32, + pub size: u16, + pub icount: u16, + pub tc: tb_tc, + pub itree: IntervalTreeNode, + pub jmp_lock: QemuSpin, + pub jmp_reset_offset: [u16; 2usize], + pub jmp_insn_offset: [u16; 2usize], + pub jmp_target_addr: [usize; 2usize], + pub jmp_list_head: usize, + pub jmp_list_next: [usize; 2usize], + pub jmp_dest: [usize; 2usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of TranslationBlock"][::std::mem::size_of::() - 168usize]; + ["Alignment of TranslationBlock"][::std::mem::align_of::() - 8usize]; + ["Offset of field: TranslationBlock::pc"] + [::std::mem::offset_of!(TranslationBlock, pc) - 0usize]; + ["Offset of field: TranslationBlock::cs_base"] + [::std::mem::offset_of!(TranslationBlock, cs_base) - 8usize]; + ["Offset of field: TranslationBlock::flags"] + [::std::mem::offset_of!(TranslationBlock, flags) - 16usize]; + ["Offset of field: TranslationBlock::cflags"] + [::std::mem::offset_of!(TranslationBlock, cflags) - 20usize]; + ["Offset of field: TranslationBlock::size"] + [::std::mem::offset_of!(TranslationBlock, size) - 24usize]; + ["Offset of field: TranslationBlock::icount"] + [::std::mem::offset_of!(TranslationBlock, icount) - 26usize]; + ["Offset of field: TranslationBlock::tc"] + [::std::mem::offset_of!(TranslationBlock, tc) - 32usize]; + ["Offset of field: TranslationBlock::itree"] + [::std::mem::offset_of!(TranslationBlock, itree) - 48usize]; + ["Offset of field: TranslationBlock::jmp_lock"] + [::std::mem::offset_of!(TranslationBlock, jmp_lock) - 96usize]; + ["Offset of field: TranslationBlock::jmp_reset_offset"] + [::std::mem::offset_of!(TranslationBlock, jmp_reset_offset) - 100usize]; + ["Offset of field: TranslationBlock::jmp_insn_offset"] + [::std::mem::offset_of!(TranslationBlock, jmp_insn_offset) - 104usize]; + ["Offset of field: TranslationBlock::jmp_target_addr"] + [::std::mem::offset_of!(TranslationBlock, jmp_target_addr) - 112usize]; + ["Offset of field: TranslationBlock::jmp_list_head"] + [::std::mem::offset_of!(TranslationBlock, jmp_list_head) - 128usize]; + ["Offset of field: TranslationBlock::jmp_list_next"] + [::std::mem::offset_of!(TranslationBlock, jmp_list_next) - 136usize]; + ["Offset of field: TranslationBlock::jmp_dest"] + [::std::mem::offset_of!(TranslationBlock, jmp_dest) - 152usize]; +}; +impl Default for TranslationBlock { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +extern "C" { + pub static mut exec_path: *mut ::std::os::raw::c_char; +} +extern "C" { + pub static mut mmap_next_start: abi_ulong; +} +extern "C" { + pub fn target_mprotect( + start: abi_ulong, + len: abi_ulong, + prot: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn target_mmap( + start: abi_ulong, + len: abi_ulong, + prot: ::std::os::raw::c_int, + flags: ::std::os::raw::c_int, + fd: ::std::os::raw::c_int, + offset: off_t, + ) -> abi_long; +} +extern "C" { + pub fn target_munmap(start: abi_ulong, len: abi_ulong) -> ::std::os::raw::c_int; +} +extern "C" { + #[doc = " read_self_maps:\n\n Read /proc/self/maps and return a tree of MapInfo structures."] + pub fn read_self_maps() -> *mut IntervalTreeRoot; +} +extern "C" { + #[doc = " free_self_maps:\n @info: an interval tree\n\n Free a tree of MapInfo structures."] + pub fn free_self_maps(root: *mut IntervalTreeRoot); +} +extern "C" { + pub fn libafl_qemu_set_breakpoint(pc: target_ulong) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_remove_breakpoint(pc: target_ulong) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_trigger_breakpoint(cpu: *mut CPUState); +} +extern "C" { + pub fn libafl_qemu_breakpoint_run(pc_next: vaddr); +} +pub const libafl_exit_reason_kind_INTERNAL: libafl_exit_reason_kind = libafl_exit_reason_kind(0); +pub const libafl_exit_reason_kind_BREAKPOINT: libafl_exit_reason_kind = libafl_exit_reason_kind(1); +pub const libafl_exit_reason_kind_SYNC_EXIT: libafl_exit_reason_kind = libafl_exit_reason_kind(2); +pub const libafl_exit_reason_kind_TIMEOUT: libafl_exit_reason_kind = libafl_exit_reason_kind(3); +impl ::std::ops::BitOr for libafl_exit_reason_kind { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + libafl_exit_reason_kind(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for libafl_exit_reason_kind { + #[inline] + fn bitor_assign(&mut self, rhs: libafl_exit_reason_kind) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for libafl_exit_reason_kind { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + libafl_exit_reason_kind(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for libafl_exit_reason_kind { + #[inline] + fn bitand_assign(&mut self, rhs: libafl_exit_reason_kind) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct libafl_exit_reason_kind(pub ::std::os::raw::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct libafl_exit_reason_internal { + pub cause: ShutdownCause, + pub signal: ::std::os::raw::c_int, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of libafl_exit_reason_internal"] + [::std::mem::size_of::() - 8usize]; + ["Alignment of libafl_exit_reason_internal"] + [::std::mem::align_of::() - 4usize]; + ["Offset of field: libafl_exit_reason_internal::cause"] + [::std::mem::offset_of!(libafl_exit_reason_internal, cause) - 0usize]; + ["Offset of field: libafl_exit_reason_internal::signal"] + [::std::mem::offset_of!(libafl_exit_reason_internal, signal) - 4usize]; +}; +impl Default for libafl_exit_reason_internal { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct libafl_exit_reason_breakpoint { + pub addr: target_ulong, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of libafl_exit_reason_breakpoint"] + [::std::mem::size_of::() - 8usize]; + ["Alignment of libafl_exit_reason_breakpoint"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: libafl_exit_reason_breakpoint::addr"] + [::std::mem::offset_of!(libafl_exit_reason_breakpoint, addr) - 0usize]; +}; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct libafl_exit_reason_sync_exit {} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of libafl_exit_reason_sync_exit"] + [::std::mem::size_of::() - 0usize]; + ["Alignment of libafl_exit_reason_sync_exit"] + [::std::mem::align_of::() - 1usize]; +}; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct libafl_exit_reason_timeout {} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of libafl_exit_reason_timeout"] + [::std::mem::size_of::() - 0usize]; + ["Alignment of libafl_exit_reason_timeout"] + [::std::mem::align_of::() - 1usize]; +}; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct libafl_exit_reason { + pub kind: libafl_exit_reason_kind, + pub cpu: *mut CPUState, + pub next_pc: vaddr, + pub data: libafl_exit_reason__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union libafl_exit_reason__bindgen_ty_1 { + pub internal: libafl_exit_reason_internal, + pub breakpoint: libafl_exit_reason_breakpoint, + pub sync_exit: libafl_exit_reason_sync_exit, + pub timeout: libafl_exit_reason_timeout, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of libafl_exit_reason__bindgen_ty_1"] + [::std::mem::size_of::() - 8usize]; + ["Alignment of libafl_exit_reason__bindgen_ty_1"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: libafl_exit_reason__bindgen_ty_1::internal"] + [::std::mem::offset_of!(libafl_exit_reason__bindgen_ty_1, internal) - 0usize]; + ["Offset of field: libafl_exit_reason__bindgen_ty_1::breakpoint"] + [::std::mem::offset_of!(libafl_exit_reason__bindgen_ty_1, breakpoint) - 0usize]; + ["Offset of field: libafl_exit_reason__bindgen_ty_1::sync_exit"] + [::std::mem::offset_of!(libafl_exit_reason__bindgen_ty_1, sync_exit) - 0usize]; + ["Offset of field: libafl_exit_reason__bindgen_ty_1::timeout"] + [::std::mem::offset_of!(libafl_exit_reason__bindgen_ty_1, timeout) - 0usize]; +}; +impl Default for libafl_exit_reason__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for libafl_exit_reason__bindgen_ty_1 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "libafl_exit_reason__bindgen_ty_1 {{ union }}") + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of libafl_exit_reason"][::std::mem::size_of::() - 32usize]; + ["Alignment of libafl_exit_reason"][::std::mem::align_of::() - 8usize]; + ["Offset of field: libafl_exit_reason::kind"] + [::std::mem::offset_of!(libafl_exit_reason, kind) - 0usize]; + ["Offset of field: libafl_exit_reason::cpu"] + [::std::mem::offset_of!(libafl_exit_reason, cpu) - 8usize]; + ["Offset of field: libafl_exit_reason::next_pc"] + [::std::mem::offset_of!(libafl_exit_reason, next_pc) - 16usize]; + ["Offset of field: libafl_exit_reason::data"] + [::std::mem::offset_of!(libafl_exit_reason, data) - 24usize]; +}; +impl Default for libafl_exit_reason { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for libafl_exit_reason { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "libafl_exit_reason {{ kind: {:?}, cpu: {:?}, data: {:?} }}", + self.kind, self.cpu, self.data + ) + } +} +extern "C" { + pub fn libafl_last_exit_cpu() -> *mut CPUState; +} +extern "C" { + pub fn libafl_exit_signal_vm_start(); +} +extern "C" { + pub fn libafl_exit_asap() -> bool; +} +extern "C" { + pub fn libafl_sync_exit_cpu(); +} +extern "C" { + pub fn libafl_exit_request_internal( + cpu: *mut CPUState, + pc: u64, + cause: ShutdownCause, + signal: ::std::os::raw::c_int, + ); +} +extern "C" { + pub fn libafl_exit_request_breakpoint(cpu: *mut CPUState, pc: target_ulong); +} +extern "C" { + pub fn libafl_exit_request_sync_backdoor(cpu: *mut CPUState, pc: target_ulong); +} +extern "C" { + pub fn libafl_get_exit_reason() -> *mut libafl_exit_reason; +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct libafl_mapinfo { + pub start: target_ulong, + pub end: target_ulong, + pub offset: target_ulong, + pub path: *const ::std::os::raw::c_char, + pub flags: ::std::os::raw::c_int, + pub is_priv: ::std::os::raw::c_int, + pub is_valid: bool, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of libafl_mapinfo"][::std::mem::size_of::() - 48usize]; + ["Alignment of libafl_mapinfo"][::std::mem::align_of::() - 8usize]; + ["Offset of field: libafl_mapinfo::start"] + [::std::mem::offset_of!(libafl_mapinfo, start) - 0usize]; + ["Offset of field: libafl_mapinfo::end"][::std::mem::offset_of!(libafl_mapinfo, end) - 8usize]; + ["Offset of field: libafl_mapinfo::offset"] + [::std::mem::offset_of!(libafl_mapinfo, offset) - 16usize]; + ["Offset of field: libafl_mapinfo::path"] + [::std::mem::offset_of!(libafl_mapinfo, path) - 24usize]; + ["Offset of field: libafl_mapinfo::flags"] + [::std::mem::offset_of!(libafl_mapinfo, flags) - 32usize]; + ["Offset of field: libafl_mapinfo::is_priv"] + [::std::mem::offset_of!(libafl_mapinfo, is_priv) - 36usize]; + ["Offset of field: libafl_mapinfo::is_valid"] + [::std::mem::offset_of!(libafl_mapinfo, is_valid) - 40usize]; +}; +impl Default for libafl_mapinfo { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +extern "C" { + pub static mut libafl_dump_core_hook: + ::std::option::Option; +} +extern "C" { + pub static mut libafl_force_dfl: ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_dump_core_exec(signal: ::std::os::raw::c_int); +} +extern "C" { + pub fn libafl_qemu_handle_crash( + host_sig: ::std::os::raw::c_int, + info: *mut siginfo_t, + puc: *mut ::std::os::raw::c_void, + ); +} +extern "C" { + pub fn libafl_maps_first(map_info: *mut IntervalTreeRoot) -> *mut IntervalTreeNode; +} +extern "C" { + pub fn libafl_maps_next( + pageflags_maps_node: *mut IntervalTreeNode, + proc_maps_node: *mut IntervalTreeRoot, + ret: *mut libafl_mapinfo, + ) -> *mut IntervalTreeNode; +} +extern "C" { + pub fn libafl_load_addr() -> u64; +} +extern "C" { + pub fn libafl_get_image_info() -> *mut image_info; +} +extern "C" { + pub fn libafl_get_brk() -> u64; +} +extern "C" { + pub fn libafl_set_brk(new_brk: u64) -> u64; +} +extern "C" { + pub fn libafl_qemu_init(argc: ::std::os::raw::c_int, argv: *mut *mut ::std::os::raw::c_char); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct AccelCPUClass { + pub parent_class: ObjectClass, + pub cpu_class_init: ::std::option::Option, + pub cpu_instance_init: ::std::option::Option, + pub cpu_target_realize: ::std::option::Option< + unsafe extern "C" fn(cpu: *mut CPUState, errp: *mut *mut Error) -> bool, + >, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of AccelCPUClass"][::std::mem::size_of::() - 120usize]; + ["Alignment of AccelCPUClass"][::std::mem::align_of::() - 8usize]; + ["Offset of field: AccelCPUClass::parent_class"] + [::std::mem::offset_of!(AccelCPUClass, parent_class) - 0usize]; + ["Offset of field: AccelCPUClass::cpu_class_init"] + [::std::mem::offset_of!(AccelCPUClass, cpu_class_init) - 96usize]; + ["Offset of field: AccelCPUClass::cpu_instance_init"] + [::std::mem::offset_of!(AccelCPUClass, cpu_instance_init) - 104usize]; + ["Offset of field: AccelCPUClass::cpu_target_realize"] + [::std::mem::offset_of!(AccelCPUClass, cpu_target_realize) - 112usize]; +}; +impl Default for AccelCPUClass { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub const qemu_plugin_mem_rw_QEMU_PLUGIN_MEM_R: qemu_plugin_mem_rw = qemu_plugin_mem_rw(1); +pub const qemu_plugin_mem_rw_QEMU_PLUGIN_MEM_W: qemu_plugin_mem_rw = qemu_plugin_mem_rw(2); +pub const qemu_plugin_mem_rw_QEMU_PLUGIN_MEM_RW: qemu_plugin_mem_rw = qemu_plugin_mem_rw(3); +impl ::std::ops::BitOr for qemu_plugin_mem_rw { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + qemu_plugin_mem_rw(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for qemu_plugin_mem_rw { + #[inline] + fn bitor_assign(&mut self, rhs: qemu_plugin_mem_rw) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for qemu_plugin_mem_rw { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + qemu_plugin_mem_rw(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for qemu_plugin_mem_rw { + #[inline] + fn bitand_assign(&mut self, rhs: qemu_plugin_mem_rw) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct qemu_plugin_mem_rw(pub ::std::os::raw::c_uint); +#[doc = " typedef qemu_plugin_meminfo_t - opaque memory transaction handle\n\n This can be further queried using the qemu_plugin_mem_* query\n functions."] +pub type qemu_plugin_meminfo_t = u32; +extern "C" { + #[doc = " qemu_plugin_get_hwaddr() - return handle for memory operation\n @info: opaque memory info structure\n @vaddr: the virtual address of the memory operation\n\n For system emulation returns a qemu_plugin_hwaddr handle to query\n details about the actual physical address backing the virtual\n address. For linux-user guests it just returns NULL.\n\n This handle is *only* valid for the duration of the callback. Any\n information about the handle should be recovered before the\n callback returns."] + pub fn qemu_plugin_get_hwaddr( + info: qemu_plugin_meminfo_t, + vaddr: u64, + ) -> *mut qemu_plugin_hwaddr; +} +extern "C" { + #[doc = " qemu_plugin_hwaddr_phys_addr() - query physical address for memory operation\n @haddr: address handle from qemu_plugin_get_hwaddr()\n\n Returns the physical address associated with the memory operation\n\n Note that the returned physical address may not be unique if you are dealing\n with multiple address spaces."] + pub fn qemu_plugin_hwaddr_phys_addr(haddr: *const qemu_plugin_hwaddr) -> u64; +} +#[doc = " struct CPUPluginState - per-CPU state for plugins\n @event_mask: plugin event bitmap. Modified only via async work."] +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct CPUPluginState { + pub event_mask: [::std::os::raw::c_ulong; 1usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of CPUPluginState"][::std::mem::size_of::() - 8usize]; + ["Alignment of CPUPluginState"][::std::mem::align_of::() - 8usize]; + ["Offset of field: CPUPluginState::event_mask"] + [::std::mem::offset_of!(CPUPluginState, event_mask) - 0usize]; +}; +pub const TCGReg_TCG_REG_EAX: TCGReg = TCGReg(0); +pub const TCGReg_TCG_REG_ECX: TCGReg = TCGReg(1); +pub const TCGReg_TCG_REG_EDX: TCGReg = TCGReg(2); +pub const TCGReg_TCG_REG_EBX: TCGReg = TCGReg(3); +pub const TCGReg_TCG_REG_ESP: TCGReg = TCGReg(4); +pub const TCGReg_TCG_REG_EBP: TCGReg = TCGReg(5); +pub const TCGReg_TCG_REG_ESI: TCGReg = TCGReg(6); +pub const TCGReg_TCG_REG_EDI: TCGReg = TCGReg(7); +pub const TCGReg_TCG_REG_R8: TCGReg = TCGReg(8); +pub const TCGReg_TCG_REG_R9: TCGReg = TCGReg(9); +pub const TCGReg_TCG_REG_R10: TCGReg = TCGReg(10); +pub const TCGReg_TCG_REG_R11: TCGReg = TCGReg(11); +pub const TCGReg_TCG_REG_R12: TCGReg = TCGReg(12); +pub const TCGReg_TCG_REG_R13: TCGReg = TCGReg(13); +pub const TCGReg_TCG_REG_R14: TCGReg = TCGReg(14); +pub const TCGReg_TCG_REG_R15: TCGReg = TCGReg(15); +pub const TCGReg_TCG_REG_XMM0: TCGReg = TCGReg(16); +pub const TCGReg_TCG_REG_XMM1: TCGReg = TCGReg(17); +pub const TCGReg_TCG_REG_XMM2: TCGReg = TCGReg(18); +pub const TCGReg_TCG_REG_XMM3: TCGReg = TCGReg(19); +pub const TCGReg_TCG_REG_XMM4: TCGReg = TCGReg(20); +pub const TCGReg_TCG_REG_XMM5: TCGReg = TCGReg(21); +pub const TCGReg_TCG_REG_XMM6: TCGReg = TCGReg(22); +pub const TCGReg_TCG_REG_XMM7: TCGReg = TCGReg(23); +pub const TCGReg_TCG_REG_XMM8: TCGReg = TCGReg(24); +pub const TCGReg_TCG_REG_XMM9: TCGReg = TCGReg(25); +pub const TCGReg_TCG_REG_XMM10: TCGReg = TCGReg(26); +pub const TCGReg_TCG_REG_XMM11: TCGReg = TCGReg(27); +pub const TCGReg_TCG_REG_XMM12: TCGReg = TCGReg(28); +pub const TCGReg_TCG_REG_XMM13: TCGReg = TCGReg(29); +pub const TCGReg_TCG_REG_XMM14: TCGReg = TCGReg(30); +pub const TCGReg_TCG_REG_XMM15: TCGReg = TCGReg(31); +pub const TCGReg_TCG_REG_RAX: TCGReg = TCGReg(0); +pub const TCGReg_TCG_REG_RCX: TCGReg = TCGReg(1); +pub const TCGReg_TCG_REG_RDX: TCGReg = TCGReg(2); +pub const TCGReg_TCG_REG_RBX: TCGReg = TCGReg(3); +pub const TCGReg_TCG_REG_RSP: TCGReg = TCGReg(4); +pub const TCGReg_TCG_REG_RBP: TCGReg = TCGReg(5); +pub const TCGReg_TCG_REG_RSI: TCGReg = TCGReg(6); +pub const TCGReg_TCG_REG_RDI: TCGReg = TCGReg(7); +pub const TCGReg_TCG_AREG0: TCGReg = TCGReg(5); +pub const TCGReg_TCG_REG_CALL_STACK: TCGReg = TCGReg(4); +impl ::std::ops::BitOr for TCGReg { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + TCGReg(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for TCGReg { + #[inline] + fn bitor_assign(&mut self, rhs: TCGReg) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for TCGReg { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + TCGReg(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for TCGReg { + #[inline] + fn bitand_assign(&mut self, rhs: TCGReg) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct TCGReg(pub ::std::os::raw::c_uint); +pub const TCGType_TCG_TYPE_I32: TCGType = TCGType(0); +pub const TCGType_TCG_TYPE_I64: TCGType = TCGType(1); +pub const TCGType_TCG_TYPE_I128: TCGType = TCGType(2); +pub const TCGType_TCG_TYPE_V64: TCGType = TCGType(3); +pub const TCGType_TCG_TYPE_V128: TCGType = TCGType(4); +pub const TCGType_TCG_TYPE_V256: TCGType = TCGType(5); +pub const TCGType_TCG_TYPE_REG: TCGType = TCGType(1); +pub const TCGType_TCG_TYPE_PTR: TCGType = TCGType(1); +impl ::std::ops::BitOr for TCGType { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + TCGType(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for TCGType { + #[inline] + fn bitor_assign(&mut self, rhs: TCGType) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for TCGType { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + TCGType(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for TCGType { + #[inline] + fn bitand_assign(&mut self, rhs: TCGType) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct TCGType(pub ::std::os::raw::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct TCGv_i64_d { + _unused: [u8; 0], +} +pub type TCGv_i64 = *mut TCGv_i64_d; +pub const TCGTempVal_TEMP_VAL_DEAD: TCGTempVal = TCGTempVal(0); +pub const TCGTempVal_TEMP_VAL_REG: TCGTempVal = TCGTempVal(1); +pub const TCGTempVal_TEMP_VAL_MEM: TCGTempVal = TCGTempVal(2); +pub const TCGTempVal_TEMP_VAL_CONST: TCGTempVal = TCGTempVal(3); +impl ::std::ops::BitOr for TCGTempVal { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + TCGTempVal(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for TCGTempVal { + #[inline] + fn bitor_assign(&mut self, rhs: TCGTempVal) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for TCGTempVal { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + TCGTempVal(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for TCGTempVal { + #[inline] + fn bitand_assign(&mut self, rhs: TCGTempVal) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct TCGTempVal(pub ::std::os::raw::c_uint); +pub const TCGTempKind_TEMP_EBB: TCGTempKind = TCGTempKind(0); +pub const TCGTempKind_TEMP_TB: TCGTempKind = TCGTempKind(1); +pub const TCGTempKind_TEMP_GLOBAL: TCGTempKind = TCGTempKind(2); +pub const TCGTempKind_TEMP_FIXED: TCGTempKind = TCGTempKind(3); +pub const TCGTempKind_TEMP_CONST: TCGTempKind = TCGTempKind(4); +impl ::std::ops::BitOr for TCGTempKind { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + TCGTempKind(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for TCGTempKind { + #[inline] + fn bitor_assign(&mut self, rhs: TCGTempKind) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for TCGTempKind { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + TCGTempKind(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for TCGTempKind { + #[inline] + fn bitand_assign(&mut self, rhs: TCGTempKind) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct TCGTempKind(pub ::std::os::raw::c_uint); +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct TCGTemp { + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 6usize]>, + pub val: i64, + pub mem_base: *mut TCGTemp, + pub mem_offset: isize, + pub name: *const ::std::os::raw::c_char, + pub state: usize, + pub state_ptr: *mut ::std::os::raw::c_void, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of TCGTemp"][::std::mem::size_of::() - 56usize]; + ["Alignment of TCGTemp"][::std::mem::align_of::() - 8usize]; + ["Offset of field: TCGTemp::val"][::std::mem::offset_of!(TCGTemp, val) - 8usize]; + ["Offset of field: TCGTemp::mem_base"][::std::mem::offset_of!(TCGTemp, mem_base) - 16usize]; + ["Offset of field: TCGTemp::mem_offset"][::std::mem::offset_of!(TCGTemp, mem_offset) - 24usize]; + ["Offset of field: TCGTemp::name"][::std::mem::offset_of!(TCGTemp, name) - 32usize]; + ["Offset of field: TCGTemp::state"][::std::mem::offset_of!(TCGTemp, state) - 40usize]; + ["Offset of field: TCGTemp::state_ptr"][::std::mem::offset_of!(TCGTemp, state_ptr) - 48usize]; +}; +impl Default for TCGTemp { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl TCGTemp { + #[inline] + pub fn reg(&self) -> TCGReg { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 8u8) as u32) } + } + #[inline] + pub fn set_reg(&mut self, val: TCGReg) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 8u8, val as u64) + } + } + #[inline] + pub fn val_type(&self) -> TCGTempVal { + unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 8u8) as u32) } + } + #[inline] + pub fn set_val_type(&mut self, val: TCGTempVal) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(8usize, 8u8, val as u64) + } + } + #[inline] + pub fn base_type(&self) -> TCGType { + unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u32) } + } + #[inline] + pub fn set_base_type(&mut self, val: TCGType) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(16usize, 8u8, val as u64) + } + } + #[inline] + pub fn type_(&self) -> TCGType { + unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 8u8) as u32) } + } + #[inline] + pub fn set_type(&mut self, val: TCGType) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(24usize, 8u8, val as u64) + } + } + #[inline] + pub fn kind(&self) -> TCGTempKind { + unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 3u8) as u32) } + } + #[inline] + pub fn set_kind(&mut self, val: TCGTempKind) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(32usize, 3u8, val as u64) + } + } + #[inline] + pub fn indirect_reg(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(35usize, 1u8) as u32) } + } + #[inline] + pub fn set_indirect_reg(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(35usize, 1u8, val as u64) + } + } + #[inline] + pub fn indirect_base(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(36usize, 1u8) as u32) } + } + #[inline] + pub fn set_indirect_base(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(36usize, 1u8, val as u64) + } + } + #[inline] + pub fn mem_coherent(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(37usize, 1u8) as u32) } + } + #[inline] + pub fn set_mem_coherent(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(37usize, 1u8, val as u64) + } + } + #[inline] + pub fn mem_allocated(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(38usize, 1u8) as u32) } + } + #[inline] + pub fn set_mem_allocated(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(38usize, 1u8, val as u64) + } + } + #[inline] + pub fn temp_allocated(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(39usize, 1u8) as u32) } + } + #[inline] + pub fn set_temp_allocated(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(39usize, 1u8, val as u64) + } + } + #[inline] + pub fn temp_subindex(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(40usize, 2u8) as u32) } + } + #[inline] + pub fn set_temp_subindex(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(40usize, 2u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1( + reg: TCGReg, + val_type: TCGTempVal, + base_type: TCGType, + type_: TCGType, + kind: TCGTempKind, + indirect_reg: ::std::os::raw::c_uint, + indirect_base: ::std::os::raw::c_uint, + mem_coherent: ::std::os::raw::c_uint, + mem_allocated: ::std::os::raw::c_uint, + temp_allocated: ::std::os::raw::c_uint, + temp_subindex: ::std::os::raw::c_uint, + ) -> __BindgenBitfieldUnit<[u8; 6usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 6usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 8u8, { + let reg: u32 = unsafe { ::std::mem::transmute(reg) }; + reg as u64 + }); + __bindgen_bitfield_unit.set(8usize, 8u8, { + let val_type: u32 = unsafe { ::std::mem::transmute(val_type) }; + val_type as u64 + }); + __bindgen_bitfield_unit.set(16usize, 8u8, { + let base_type: u32 = unsafe { ::std::mem::transmute(base_type) }; + base_type as u64 + }); + __bindgen_bitfield_unit.set(24usize, 8u8, { + let type_: u32 = unsafe { ::std::mem::transmute(type_) }; + type_ as u64 + }); + __bindgen_bitfield_unit.set(32usize, 3u8, { + let kind: u32 = unsafe { ::std::mem::transmute(kind) }; + kind as u64 + }); + __bindgen_bitfield_unit.set(35usize, 1u8, { + let indirect_reg: u32 = unsafe { ::std::mem::transmute(indirect_reg) }; + indirect_reg as u64 + }); + __bindgen_bitfield_unit.set(36usize, 1u8, { + let indirect_base: u32 = unsafe { ::std::mem::transmute(indirect_base) }; + indirect_base as u64 + }); + __bindgen_bitfield_unit.set(37usize, 1u8, { + let mem_coherent: u32 = unsafe { ::std::mem::transmute(mem_coherent) }; + mem_coherent as u64 + }); + __bindgen_bitfield_unit.set(38usize, 1u8, { + let mem_allocated: u32 = unsafe { ::std::mem::transmute(mem_allocated) }; + mem_allocated as u64 + }); + __bindgen_bitfield_unit.set(39usize, 1u8, { + let temp_allocated: u32 = unsafe { ::std::mem::transmute(temp_allocated) }; + temp_allocated as u64 + }); + __bindgen_bitfield_unit.set(40usize, 2u8, { + let temp_subindex: u32 = unsafe { ::std::mem::transmute(temp_subindex) }; + temp_subindex as u64 + }); + __bindgen_bitfield_unit + } +} +pub const TCGCallReturnKind_TCG_CALL_RET_NORMAL: TCGCallReturnKind = TCGCallReturnKind(0); +pub const TCGCallReturnKind_TCG_CALL_RET_BY_REF: TCGCallReturnKind = TCGCallReturnKind(1); +pub const TCGCallReturnKind_TCG_CALL_RET_BY_VEC: TCGCallReturnKind = TCGCallReturnKind(2); +impl ::std::ops::BitOr for TCGCallReturnKind { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + TCGCallReturnKind(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for TCGCallReturnKind { + #[inline] + fn bitor_assign(&mut self, rhs: TCGCallReturnKind) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for TCGCallReturnKind { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + TCGCallReturnKind(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for TCGCallReturnKind { + #[inline] + fn bitand_assign(&mut self, rhs: TCGCallReturnKind) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct TCGCallReturnKind(pub ::std::os::raw::c_uint); +pub const TCGCallArgumentKind_TCG_CALL_ARG_NORMAL: TCGCallArgumentKind = TCGCallArgumentKind(0); +pub const TCGCallArgumentKind_TCG_CALL_ARG_EVEN: TCGCallArgumentKind = TCGCallArgumentKind(1); +pub const TCGCallArgumentKind_TCG_CALL_ARG_EXTEND: TCGCallArgumentKind = TCGCallArgumentKind(2); +pub const TCGCallArgumentKind_TCG_CALL_ARG_EXTEND_U: TCGCallArgumentKind = TCGCallArgumentKind(3); +pub const TCGCallArgumentKind_TCG_CALL_ARG_EXTEND_S: TCGCallArgumentKind = TCGCallArgumentKind(4); +pub const TCGCallArgumentKind_TCG_CALL_ARG_BY_REF: TCGCallArgumentKind = TCGCallArgumentKind(5); +pub const TCGCallArgumentKind_TCG_CALL_ARG_BY_REF_N: TCGCallArgumentKind = TCGCallArgumentKind(6); +impl ::std::ops::BitOr for TCGCallArgumentKind { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + TCGCallArgumentKind(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for TCGCallArgumentKind { + #[inline] + fn bitor_assign(&mut self, rhs: TCGCallArgumentKind) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for TCGCallArgumentKind { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + TCGCallArgumentKind(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for TCGCallArgumentKind { + #[inline] + fn bitand_assign(&mut self, rhs: TCGCallArgumentKind) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct TCGCallArgumentKind(pub ::std::os::raw::c_uint); +#[repr(C)] +#[repr(align(4))] +#[derive(Debug, Copy, Clone)] +pub struct TCGCallArgumentLoc { + pub _bitfield_align_1: [u8; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of TCGCallArgumentLoc"][::std::mem::size_of::() - 4usize]; + ["Alignment of TCGCallArgumentLoc"][::std::mem::align_of::() - 4usize]; +}; +impl Default for TCGCallArgumentLoc { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl TCGCallArgumentLoc { + #[inline] + pub fn kind(&self) -> TCGCallArgumentKind { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 8u8) as u32) } + } + #[inline] + pub fn set_kind(&mut self, val: TCGCallArgumentKind) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 8u8, val as u64) + } + } + #[inline] + pub fn arg_slot(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 8u8) as u32) } + } + #[inline] + pub fn set_arg_slot(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(8usize, 8u8, val as u64) + } + } + #[inline] + pub fn ref_slot(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u32) } + } + #[inline] + pub fn set_ref_slot(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(16usize, 8u8, val as u64) + } + } + #[inline] + pub fn arg_idx(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 4u8) as u32) } + } + #[inline] + pub fn set_arg_idx(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(24usize, 4u8, val as u64) + } + } + #[inline] + pub fn tmp_subindex(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(28usize, 2u8) as u32) } + } + #[inline] + pub fn set_tmp_subindex(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(28usize, 2u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1( + kind: TCGCallArgumentKind, + arg_slot: ::std::os::raw::c_uint, + ref_slot: ::std::os::raw::c_uint, + arg_idx: ::std::os::raw::c_uint, + tmp_subindex: ::std::os::raw::c_uint, + ) -> __BindgenBitfieldUnit<[u8; 4usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 8u8, { + let kind: u32 = unsafe { ::std::mem::transmute(kind) }; + kind as u64 + }); + __bindgen_bitfield_unit.set(8usize, 8u8, { + let arg_slot: u32 = unsafe { ::std::mem::transmute(arg_slot) }; + arg_slot as u64 + }); + __bindgen_bitfield_unit.set(16usize, 8u8, { + let ref_slot: u32 = unsafe { ::std::mem::transmute(ref_slot) }; + ref_slot as u64 + }); + __bindgen_bitfield_unit.set(24usize, 4u8, { + let arg_idx: u32 = unsafe { ::std::mem::transmute(arg_idx) }; + arg_idx as u64 + }); + __bindgen_bitfield_unit.set(28usize, 2u8, { + let tmp_subindex: u32 = unsafe { ::std::mem::transmute(tmp_subindex) }; + tmp_subindex as u64 + }); + __bindgen_bitfield_unit + } +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct TCGHelperInfo { + pub func: *mut ::std::os::raw::c_void, + pub name: *const ::std::os::raw::c_char, + pub init: usize, + pub _bitfield_align_1: [u32; 0], + pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, + pub in_: [TCGCallArgumentLoc; 14usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of TCGHelperInfo"][::std::mem::size_of::() - 88usize]; + ["Alignment of TCGHelperInfo"][::std::mem::align_of::() - 8usize]; + ["Offset of field: TCGHelperInfo::func"][::std::mem::offset_of!(TCGHelperInfo, func) - 0usize]; + ["Offset of field: TCGHelperInfo::name"][::std::mem::offset_of!(TCGHelperInfo, name) - 8usize]; + ["Offset of field: TCGHelperInfo::init"][::std::mem::offset_of!(TCGHelperInfo, init) - 16usize]; + ["Offset of field: TCGHelperInfo::in_"][::std::mem::offset_of!(TCGHelperInfo, in_) - 32usize]; +}; +impl Default for TCGHelperInfo { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl TCGHelperInfo { + #[inline] + pub fn typemask(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 32u8) as u32) } + } + #[inline] + pub fn set_typemask(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(0usize, 32u8, val as u64) + } + } + #[inline] + pub fn flags(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 8u8) as u32) } + } + #[inline] + pub fn set_flags(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(32usize, 8u8, val as u64) + } + } + #[inline] + pub fn nr_in(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(40usize, 8u8) as u32) } + } + #[inline] + pub fn set_nr_in(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(40usize, 8u8, val as u64) + } + } + #[inline] + pub fn nr_out(&self) -> ::std::os::raw::c_uint { + unsafe { ::std::mem::transmute(self._bitfield_1.get(48usize, 8u8) as u32) } + } + #[inline] + pub fn set_nr_out(&mut self, val: ::std::os::raw::c_uint) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(48usize, 8u8, val as u64) + } + } + #[inline] + pub fn out_kind(&self) -> TCGCallReturnKind { + unsafe { ::std::mem::transmute(self._bitfield_1.get(56usize, 8u8) as u32) } + } + #[inline] + pub fn set_out_kind(&mut self, val: TCGCallReturnKind) { + unsafe { + let val: u32 = ::std::mem::transmute(val); + self._bitfield_1.set(56usize, 8u8, val as u64) + } + } + #[inline] + pub fn new_bitfield_1( + typemask: ::std::os::raw::c_uint, + flags: ::std::os::raw::c_uint, + nr_in: ::std::os::raw::c_uint, + nr_out: ::std::os::raw::c_uint, + out_kind: TCGCallReturnKind, + ) -> __BindgenBitfieldUnit<[u8; 8usize]> { + let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); + __bindgen_bitfield_unit.set(0usize, 32u8, { + let typemask: u32 = unsafe { ::std::mem::transmute(typemask) }; + typemask as u64 + }); + __bindgen_bitfield_unit.set(32usize, 8u8, { + let flags: u32 = unsafe { ::std::mem::transmute(flags) }; + flags as u64 + }); + __bindgen_bitfield_unit.set(40usize, 8u8, { + let nr_in: u32 = unsafe { ::std::mem::transmute(nr_in) }; + nr_in as u64 + }); + __bindgen_bitfield_unit.set(48usize, 8u8, { + let nr_out: u32 = unsafe { ::std::mem::transmute(nr_out) }; + nr_out as u64 + }); + __bindgen_bitfield_unit.set(56usize, 8u8, { + let out_kind: u32 = unsafe { ::std::mem::transmute(out_kind) }; + out_kind as u64 + }); + __bindgen_bitfield_unit + } +} +pub type TCGv = TCGv_i64; +#[doc = " struct qemu_plugin_hwaddr - opaque hw address handle"] +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct qemu_plugin_hwaddr { + pub is_io: bool, + pub is_store: bool, + pub phys_addr: hwaddr, + pub mr: *mut MemoryRegion, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of qemu_plugin_hwaddr"][::std::mem::size_of::() - 24usize]; + ["Alignment of qemu_plugin_hwaddr"][::std::mem::align_of::() - 8usize]; + ["Offset of field: qemu_plugin_hwaddr::is_io"] + [::std::mem::offset_of!(qemu_plugin_hwaddr, is_io) - 0usize]; + ["Offset of field: qemu_plugin_hwaddr::is_store"] + [::std::mem::offset_of!(qemu_plugin_hwaddr, is_store) - 1usize]; + ["Offset of field: qemu_plugin_hwaddr::phys_addr"] + [::std::mem::offset_of!(qemu_plugin_hwaddr, phys_addr) - 8usize]; + ["Offset of field: qemu_plugin_hwaddr::mr"] + [::std::mem::offset_of!(qemu_plugin_hwaddr, mr) - 16usize]; +}; +impl Default for qemu_plugin_hwaddr { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +extern "C" { + #[doc = " tlb_plugin_lookup: query last TLB lookup\n @cpu: cpu environment\n\n This function can be used directly after a memory operation to\n query information about the access. It is used by the plugin\n infrastructure to expose more information about the address.\n\n It would only fail if not called from an instrumented memory access\n which would be an abuse of the API."] + pub fn tlb_plugin_lookup( + cpu: *mut CPUState, + addr: vaddr, + mmu_idx: ::std::os::raw::c_int, + is_store: bool, + data: *mut qemu_plugin_hwaddr, + ) -> bool; +} +extern "C" { + pub fn libafl_page_from_addr(addr: target_ulong) -> target_ulong; +} +extern "C" { + pub fn libafl_qemu_get_cpu(cpu_index: ::std::os::raw::c_int) -> *mut CPUState; +} +extern "C" { + pub fn libafl_qemu_num_cpus() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_current_cpu() -> *mut CPUState; +} +extern "C" { + pub fn libafl_qemu_cpu_index(arg1: *mut CPUState) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_write_reg( + cpu: *mut CPUState, + reg: ::std::os::raw::c_int, + val: *mut u8, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_read_reg( + cpu: *mut CPUState, + reg: ::std::os::raw::c_int, + val: *mut u8, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_num_regs(cpu: *mut CPUState) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_flush_jit(); +} +extern "C" { + pub fn libafl_breakpoint_invalidate(cpu: *mut CPUState, pc: target_ulong); +} +extern "C" { + pub fn libafl_qemu_main() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_run() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_set_qemu_env(env: *mut CPUArchState); +} +extern "C" { + pub fn libafl_qemu_add_gdb_cmd( + callback: ::std::option::Option< + unsafe extern "C" fn( + arg1: *mut ::std::os::raw::c_void, + arg2: *mut u8, + arg3: usize, + ) -> bool, + >, + data: *mut ::std::os::raw::c_void, + ); +} +extern "C" { + pub fn libafl_qemu_gdb_reply(buf: *const u8, len: usize); +} +extern "C" { + pub fn libafl_qemu_gdb_exec() -> bool; +} +extern "C" { + pub fn libafl_jit_trace_edge_hitcount(data: u64, id: u64) -> usize; +} +extern "C" { + pub fn libafl_jit_trace_edge_single(data: u64, id: u64) -> usize; +} +extern "C" { + pub fn libafl_jit_trace_block_hitcount(data: u64, id: u64) -> usize; +} +extern "C" { + pub fn libafl_jit_trace_block_single(data: u64, id: u64) -> usize; +} +extern "C" { + pub fn libafl_qemu_host_page_size() -> usize; +} +extern "C" { + pub fn libafl_tcg_gen_asan(addr: *mut TCGTemp, size: usize); +} +extern "C" { + pub fn libafl_gen_backdoor(pc: target_ulong); +} +extern "C" { + pub fn libafl_add_backdoor_hook( + exec: ::std::option::Option< + unsafe extern "C" fn(data: u64, cpu: *mut CPUArchState, pc: target_ulong), + >, + data: u64, + ) -> usize; +} +extern "C" { + pub fn libafl_qemu_remove_backdoor_hook( + num: usize, + invalidate: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_hook_backdoor_run(pc_next: vaddr); +} +extern "C" { + pub fn libafl_qemu_hook_block_post_gen(tb: *mut TranslationBlock, pc: vaddr); +} +extern "C" { + pub fn libafl_qemu_hook_block_run(pc: target_ulong); +} +extern "C" { + pub fn libafl_qemu_block_hook_set_jit( + num: usize, + jit: ::std::option::Option usize>, + ) -> bool; +} +extern "C" { + pub fn libafl_qemu_remove_block_hook( + num: usize, + invalidate: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_add_block_hook( + gen: ::std::option::Option u64>, + post_gen: ::std::option::Option< + unsafe extern "C" fn(data: u64, pc: target_ulong, block_length: target_ulong), + >, + exec: ::std::option::Option, + data: u64, + ) -> usize; +} +extern "C" { + pub fn libafl_gen_cmp(pc: target_ulong, op0: TCGv, op1: TCGv, ot: MemOp); +} +extern "C" { + pub fn libafl_add_cmp_hook( + gen: ::std::option::Option< + unsafe extern "C" fn(data: u64, pc: target_ulong, size: usize) -> u64, + >, + exec1: ::std::option::Option, + exec2: ::std::option::Option, + exec4: ::std::option::Option, + exec8: ::std::option::Option, + data: u64, + ) -> usize; +} +extern "C" { + pub fn libafl_qemu_remove_cmp_hook( + num: usize, + invalidate: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_gen_edge( + cpu: *mut CPUState, + src_block: target_ulong, + dst_block: target_ulong, + exit_n: ::std::os::raw::c_int, + cs_base: target_ulong, + flags: u32, + cflags: ::std::os::raw::c_int, + ) -> *mut TranslationBlock; +} +extern "C" { + pub fn libafl_add_edge_hook( + gen: ::std::option::Option< + unsafe extern "C" fn(data: u64, src: target_ulong, dst: target_ulong) -> u64, + >, + exec: ::std::option::Option, + data: u64, + ) -> usize; +} +extern "C" { + pub fn libafl_qemu_edge_hook_set_jit( + num: usize, + jit: ::std::option::Option usize>, + ) -> bool; +} +extern "C" { + pub fn libafl_qemu_remove_edge_hook( + num: usize, + invalidate: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_hook_edge_gen(src_block: target_ulong, dst_block: target_ulong) -> bool; +} +extern "C" { + pub fn libafl_qemu_hook_edge_run(); +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct libafl_instruction_hook { + pub data: u64, + pub num: usize, + pub addr: target_ulong, + pub helper_info: TCGHelperInfo, + pub next: *mut libafl_instruction_hook, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of libafl_instruction_hook"] + [::std::mem::size_of::() - 120usize]; + ["Alignment of libafl_instruction_hook"] + [::std::mem::align_of::() - 8usize]; + ["Offset of field: libafl_instruction_hook::data"] + [::std::mem::offset_of!(libafl_instruction_hook, data) - 0usize]; + ["Offset of field: libafl_instruction_hook::num"] + [::std::mem::offset_of!(libafl_instruction_hook, num) - 8usize]; + ["Offset of field: libafl_instruction_hook::addr"] + [::std::mem::offset_of!(libafl_instruction_hook, addr) - 16usize]; + ["Offset of field: libafl_instruction_hook::helper_info"] + [::std::mem::offset_of!(libafl_instruction_hook, helper_info) - 24usize]; + ["Offset of field: libafl_instruction_hook::next"] + [::std::mem::offset_of!(libafl_instruction_hook, next) - 112usize]; +}; +impl Default for libafl_instruction_hook { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +extern "C" { + pub fn libafl_qemu_add_instruction_hooks( + pc: target_ulong, + callback: ::std::option::Option, + data: u64, + invalidate: ::std::os::raw::c_int, + ) -> usize; +} +extern "C" { + pub fn libafl_qemu_remove_instruction_hook( + num: usize, + invalidate: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_remove_instruction_hooks_at( + addr: target_ulong, + invalidate: ::std::os::raw::c_int, + ) -> usize; +} +extern "C" { + pub fn libafl_search_instruction_hook(addr: target_ulong) -> *mut libafl_instruction_hook; +} +extern "C" { + pub fn libafl_qemu_hook_instruction_run(pc_next: vaddr); +} +extern "C" { + pub fn libafl_gen_read(addr: *mut TCGTemp, oi: MemOpIdx); +} +extern "C" { + pub fn libafl_gen_write(addr: *mut TCGTemp, oi: MemOpIdx); +} +extern "C" { + pub fn libafl_add_read_hook( + gen: ::std::option::Option< + unsafe extern "C" fn( + data: u64, + pc: target_ulong, + addr: *mut TCGTemp, + oi: MemOpIdx, + ) -> u64, + >, + exec1: ::std::option::Option, + exec2: ::std::option::Option, + exec4: ::std::option::Option, + exec8: ::std::option::Option, + execN: ::std::option::Option< + unsafe extern "C" fn(data: u64, id: u64, addr: target_ulong, size: usize), + >, + data: u64, + ) -> usize; +} +extern "C" { + pub fn libafl_add_write_hook( + gen: ::std::option::Option< + unsafe extern "C" fn( + data: u64, + pc: target_ulong, + addr: *mut TCGTemp, + oi: MemOpIdx, + ) -> u64, + >, + exec1: ::std::option::Option, + exec2: ::std::option::Option, + exec4: ::std::option::Option, + exec8: ::std::option::Option, + execN: ::std::option::Option< + unsafe extern "C" fn(data: u64, id: u64, addr: target_ulong, size: usize), + >, + data: u64, + ) -> usize; +} +extern "C" { + pub fn libafl_qemu_remove_read_hook( + num: usize, + invalidate: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_remove_write_hook( + num: usize, + invalidate: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +pub type libafl_cpu_run_fn = + ::std::option::Option; +extern "C" { + pub fn libafl_hook_cpu_run_add( + pre_cpu_run: libafl_cpu_run_fn, + post_cpu_run: libafl_cpu_run_fn, + data: u64, + ) -> usize; +} +extern "C" { + pub fn libafl_hook_cpu_run_remove(num: usize) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_remove_cpu_run_hook(num: usize) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_hook_cpu_run_pre_exec(cpu: *mut CPUState); +} +extern "C" { + pub fn libafl_hook_cpu_run_post_exec(cpu: *mut CPUState); +} +extern "C" { + pub fn libafl_add_new_thread_hook( + callback: ::std::option::Option< + unsafe extern "C" fn(data: u64, env: *mut CPUArchState, tid: u32) -> bool, + >, + data: u64, + ) -> usize; +} +extern "C" { + pub fn libafl_qemu_remove_new_thread_hook(num: usize) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_hook_new_thread_run(env: *mut CPUArchState, tid: u32) -> bool; +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct syshook_ret { + pub retval: target_ulong, + pub skip_syscall: bool, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of syshook_ret"][::std::mem::size_of::() - 16usize]; + ["Alignment of syshook_ret"][::std::mem::align_of::() - 8usize]; + ["Offset of field: syshook_ret::retval"][::std::mem::offset_of!(syshook_ret, retval) - 0usize]; + ["Offset of field: syshook_ret::skip_syscall"] + [::std::mem::offset_of!(syshook_ret, skip_syscall) - 8usize]; +}; +extern "C" { + pub fn libafl_add_pre_syscall_hook( + callback: ::std::option::Option< + unsafe extern "C" fn( + data: u64, + sys_num: ::std::os::raw::c_int, + arg0: target_ulong, + arg1: target_ulong, + arg2: target_ulong, + arg3: target_ulong, + arg4: target_ulong, + arg5: target_ulong, + arg6: target_ulong, + arg7: target_ulong, + ) -> syshook_ret, + >, + data: u64, + ) -> usize; +} +extern "C" { + pub fn libafl_add_post_syscall_hook( + callback: ::std::option::Option< + unsafe extern "C" fn( + data: u64, + ret: target_ulong, + sys_num: ::std::os::raw::c_int, + arg0: target_ulong, + arg1: target_ulong, + arg2: target_ulong, + arg3: target_ulong, + arg4: target_ulong, + arg5: target_ulong, + arg6: target_ulong, + arg7: target_ulong, + ) -> target_ulong, + >, + data: u64, + ) -> usize; +} +extern "C" { + pub fn libafl_qemu_remove_pre_syscall_hook(num: usize) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_qemu_remove_post_syscall_hook(num: usize) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn libafl_hook_syscall_pre_run( + env: *mut CPUArchState, + num: ::std::os::raw::c_int, + arg1: abi_long, + arg2: abi_long, + arg3: abi_long, + arg4: abi_long, + arg5: abi_long, + arg6: abi_long, + arg7: abi_long, + arg8: abi_long, + ret: *mut abi_long, + ) -> bool; +} +extern "C" { + pub fn libafl_hook_syscall_post_run( + num: ::std::os::raw::c_int, + arg1: abi_long, + arg2: abi_long, + arg3: abi_long, + arg4: abi_long, + arg5: abi_long, + arg6: abi_long, + arg7: abi_long, + arg8: abi_long, + ret: *mut abi_long, + ); +} +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct kvm_dirty_gfn { + pub _address: u8, +} diff --git a/libafl_qemu/libafl_qemu_sys/src/lib.rs b/libafl_qemu/libafl_qemu_sys/src/lib.rs index ab2bcb917a..d72dcf14f4 100644 --- a/libafl_qemu/libafl_qemu_sys/src/lib.rs +++ b/libafl_qemu/libafl_qemu_sys/src/lib.rs @@ -5,42 +5,30 @@ Have a look at `libafl_qemu` for higher-level abstractions. __Warning__: The documentation is built by default for `x86_64` in `usermode`. To access the documentation of other architectures or systemmode, the documentation must be rebuilt with the right features. */ -#![forbid(unexpected_cfgs)] -#![allow(non_upper_case_globals)] -#![allow(non_camel_case_types)] -#![allow(non_snake_case)] -#![allow(improper_ctypes)] -#![allow(unused_mut)] -#![allow(clippy::all)] -#![allow(clippy::pedantic)] #![cfg_attr(nightly, feature(used_with_arg))] +#[cfg(target_os = "linux")] +use core::ops::BitAnd; +use std::ffi::c_void; + use num_enum::{IntoPrimitive, TryFromPrimitive}; -use paste::paste; use strum_macros::EnumIter; -#[cfg(all(not(feature = "clippy"), target_os = "linux"))] -mod bindings { - include!(concat!(env!("OUT_DIR"), "/bindings.rs")); -} -#[cfg(all(not(feature = "clippy"), target_os = "linux"))] +mod bindings; pub use bindings::*; -#[cfg(any(feature = "clippy", not(target_os = "linux")))] -#[rustfmt::skip] -mod x86_64_stub_bindings; - -#[cfg(emulation_mode = "usermode")] +#[cfg(feature = "usermode")] mod usermode; -#[cfg(emulation_mode = "usermode")] +#[cfg(feature = "usermode")] pub use usermode::*; -#[cfg(emulation_mode = "systemmode")] +#[cfg(feature = "systemmode")] mod systemmode; -#[cfg(emulation_mode = "systemmode")] +#[cfg(feature = "systemmode")] pub use systemmode::*; /// Safe linking with of extern "C" functions. +/// /// This macro makes sure the declared symbol is defined *at link time*, avoiding declaring non-existant symbols /// that could be silently ignored during linking if unused. /// @@ -72,7 +60,8 @@ macro_rules! extern_c_checked { unsafe impl Sync for [<__ $c_var:upper _STRUCT__>] {} #[cfg_attr(nightly, used(linker))] - static [<__ $c_var:upper __>]: [<__ $c_var:upper _STRUCT__>] = unsafe { [<__ $c_var:upper _STRUCT__>] { member: core::ptr::addr_of!($c_var) } }; + #[allow(unused_unsafe)] + static [<__ $c_var:upper __>]: [<__ $c_var:upper _STRUCT__>] = unsafe { [<__ $c_var:upper _STRUCT__>] { member: &raw const $c_var } }; } extern "C" { @@ -91,7 +80,8 @@ macro_rules! extern_c_checked { unsafe impl Sync for [<__ $c_var:upper _STRUCT__>] {} #[cfg_attr(nightly, used(linker))] - static mut [<__ $c_var:upper __>]: [<__ $c_var:upper _STRUCT__>] = unsafe { [<__ $c_var:upper _STRUCT__>] { member: core::ptr::addr_of!($c_var) } }; + #[allow(unused_unsafe)] + static mut [<__ $c_var:upper __>]: [<__ $c_var:upper _STRUCT__>] = unsafe { [<__ $c_var:upper _STRUCT__>] { member: &raw const $c_var } }; } extern "C" { @@ -102,39 +92,18 @@ macro_rules! extern_c_checked { }; } -#[cfg(target_os = "linux")] -use core::ops::BitAnd; -use std::ffi::c_void; +pub type CPUStatePtr = *mut CPUState; +pub type CPUArchStatePtr = *mut CPUArchState; +pub type ExitReasonPtr = *mut libafl_exit_reason; -#[cfg(feature = "python")] -use pyo3::{pyclass, pymethods, IntoPy, PyObject, Python}; -#[cfg(any(feature = "clippy", not(target_os = "linux")))] -pub use x86_64_stub_bindings::*; +pub type GuestUsize = target_ulong; +pub type GuestIsize = target_long; -pub type CPUStatePtr = *mut crate::CPUState; -pub type CPUArchStatePtr = *mut crate::CPUArchState; -pub type ExitReasonPtr = *mut crate::libafl_exit_reason; +pub type GuestAddr = target_ulong; +pub type GuestPhysAddr = hwaddr; +pub type GuestVirtAddr = vaddr; -pub type GuestUsize = crate::target_ulong; -pub type GuestIsize = crate::target_long; - -pub type GuestAddr = crate::target_ulong; -pub type GuestPhysAddr = crate::hwaddr; -pub type GuestVirtAddr = crate::vaddr; - -pub type GuestHwAddrInfo = crate::qemu_plugin_hwaddr; - -#[derive(Debug)] -#[repr(C)] -#[cfg_attr(feature = "python", pyclass(unsendable))] -pub struct MapInfo { - start: GuestAddr, - end: GuestAddr, - offset: GuestAddr, - path: Option, - flags: i32, - is_priv: i32, -} +pub type GuestHwAddrInfo = qemu_plugin_hwaddr; #[repr(C)] #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -156,11 +125,13 @@ pub enum MmapPerms { // from include/exec/memop.h #[cfg(target_os = "linux")] +#[must_use] pub fn memop_size(op: MemOp) -> u32 { 1 << op.bitand(MemOp_MO_SIZE).0 } #[cfg(target_os = "linux")] +#[must_use] pub fn memop_big_endian(op: MemOp) -> bool { op.bitand(MemOp_MO_BSWAP) == MemOp_MO_BE } @@ -168,123 +139,16 @@ pub fn memop_big_endian(op: MemOp) -> bool { // from include/qemu/plugin.h #[cfg(target_os = "linux")] +#[must_use] pub fn make_plugin_meminfo(oi: MemOpIdx, rw: qemu_plugin_mem_rw) -> qemu_plugin_meminfo_t { oi | (rw.0 << 16) } // from include/hw/core/cpu.h +/// # Safety +/// Will dereference the `cpu` pointer. #[cfg(target_os = "linux")] -pub fn cpu_env(cpu: *mut CPUState) -> *mut CPUArchState { +pub unsafe fn cpu_env(cpu: *mut CPUState) -> *mut CPUArchState { unsafe { cpu.add(1) as *mut CPUArchState } } - -extern_c_checked! { - //static libafl_page_size: GuestUsize; - pub fn libafl_page_from_addr(addr: GuestAddr) -> GuestAddr; - - // CPUState* libafl_qemu_get_cpu(int cpu_index); - pub fn libafl_qemu_get_cpu(cpu_index: i32) -> CPUStatePtr; - // int libafl_qemu_num_cpus(void); - pub fn libafl_qemu_num_cpus() -> i32; - // CPUState* libafl_qemu_current_cpu(void); - pub fn libafl_qemu_current_cpu() -> CPUStatePtr; - - // struct libafl_exit_reason* libafl_get_exit_reason(void); - // fn libafl_get_exit_reason() -> ExitReasonPtr; - - pub fn libafl_qemu_cpu_index(cpu: CPUStatePtr) -> i32; - - pub fn libafl_qemu_write_reg(cpu: CPUStatePtr, reg: i32, val: *const u8) -> i32; - pub fn libafl_qemu_read_reg(cpu: CPUStatePtr, reg: i32, val: *mut u8) -> i32; - pub fn libafl_qemu_num_regs(cpu: CPUStatePtr) -> i32; - - // fn libafl_qemu_set_breakpoint(addr: u64) -> i32; - // fn libafl_qemu_remove_breakpoint(addr: u64) -> i32; - pub fn libafl_flush_jit(); - // fn libafl_qemu_trigger_breakpoint(cpu: CPUStatePtr); - - pub fn strlen(s: *const u8) -> usize; - - pub fn libafl_qemu_add_gdb_cmd( - callback: extern "C" fn(*const (), *const u8, usize) -> i32, - data: *const () - ); - pub fn libafl_qemu_gdb_reply(buf: *const u8, len: usize); -} - -#[cfg_attr(feature = "python", pymethods)] -impl MapInfo { - #[must_use] - pub fn start(&self) -> GuestAddr { - self.start - } - - #[must_use] - pub fn end(&self) -> GuestAddr { - self.end - } - - #[must_use] - pub fn offset(&self) -> GuestAddr { - self.offset - } - - #[must_use] - pub fn path(&self) -> Option<&String> { - self.path.as_ref() - } - - #[must_use] - pub fn flags(&self) -> MmapPerms { - MmapPerms::try_from(self.flags).unwrap() - } - - #[must_use] - pub fn is_priv(&self) -> bool { - self.is_priv != 0 - } -} - -impl MmapPerms { - #[must_use] - pub fn readable(&self) -> bool { - matches!( - self, - MmapPerms::Read - | MmapPerms::ReadWrite - | MmapPerms::ReadExecute - | MmapPerms::ReadWriteExecute - ) - } - - #[must_use] - pub fn writable(&self) -> bool { - matches!( - self, - MmapPerms::Write - | MmapPerms::ReadWrite - | MmapPerms::WriteExecute - | MmapPerms::ReadWriteExecute - ) - } - - #[must_use] - pub fn executable(&self) -> bool { - matches!( - self, - MmapPerms::Execute - | MmapPerms::ReadExecute - | MmapPerms::WriteExecute - | MmapPerms::ReadWriteExecute - ) - } -} - -#[cfg(feature = "python")] -impl IntoPy for MmapPerms { - fn into_py(self, py: Python) -> PyObject { - let n: i32 = self.into(); - n.into_py(py) - } -} diff --git a/libafl_qemu/libafl_qemu_sys/src/usermode.rs b/libafl_qemu/libafl_qemu_sys/src/usermode.rs index a378a96f5c..50b526c5b1 100644 --- a/libafl_qemu/libafl_qemu_sys/src/usermode.rs +++ b/libafl_qemu/libafl_qemu_sys/src/usermode.rs @@ -1,27 +1,18 @@ +#[cfg(target_os = "linux")] use core::{slice::from_raw_parts, str::from_utf8_unchecked}; +#[cfg(feature = "python")] +use std::convert::Infallible; +#[cfg(target_os = "linux")] +use libc::{c_char, strlen}; use num_enum::{IntoPrimitive, TryFromPrimitive}; -use paste::paste; +#[cfg(feature = "python")] +use pyo3::{pyclass, pymethods, types::PyInt, Bound, IntoPyObject, Python}; use strum_macros::EnumIter; -use crate::{extern_c_checked, libafl_mapinfo, strlen, GuestAddr, MapInfo}; - -extern_c_checked! { - pub fn qemu_user_init(argc: i32, argv: *const *const u8, envp: *const *const u8) -> i32; - - pub fn libafl_qemu_run() -> i32; - - pub fn libafl_load_addr() -> u64; - pub fn libafl_get_brk() -> u64; - pub fn libafl_set_brk(brk: u64) -> u64; - - pub static exec_path: *const u8; - pub static guest_base: usize; - pub static mut mmap_next_start: GuestAddr; - - pub static mut libafl_dump_core_hook: unsafe extern "C" fn(i32); - pub static mut libafl_force_dfl: i32; -} +use crate::MmapPerms; +#[cfg(target_os = "linux")] +use crate::{libafl_mapinfo, GuestAddr}; #[derive(IntoPrimitive, TryFromPrimitive, Debug, Clone, Copy, EnumIter, PartialEq, Eq)] #[repr(i32)] @@ -30,6 +21,101 @@ pub enum VerifyAccess { Write = libc::PROT_READ | libc::PROT_WRITE, } +#[derive(Debug)] +#[repr(C)] +#[cfg(target_os = "linux")] +#[cfg_attr(feature = "python", pyclass(unsendable))] +pub struct MapInfo { + start: GuestAddr, + end: GuestAddr, + offset: GuestAddr, + path: Option, + flags: i32, + is_priv: i32, +} + +#[cfg(target_os = "linux")] +#[cfg_attr(feature = "python", pymethods)] +impl MapInfo { + #[must_use] + pub fn start(&self) -> GuestAddr { + self.start + } + + #[must_use] + pub fn end(&self) -> GuestAddr { + self.end + } + + #[must_use] + pub fn offset(&self) -> GuestAddr { + self.offset + } + + #[must_use] + pub fn path(&self) -> Option<&String> { + self.path.as_ref() + } + + #[must_use] + pub fn flags(&self) -> MmapPerms { + MmapPerms::try_from(self.flags).unwrap() + } + + #[must_use] + pub fn is_priv(&self) -> bool { + self.is_priv != 0 + } +} + +impl MmapPerms { + #[must_use] + pub fn readable(&self) -> bool { + matches!( + self, + MmapPerms::Read + | MmapPerms::ReadWrite + | MmapPerms::ReadExecute + | MmapPerms::ReadWriteExecute + ) + } + + #[must_use] + pub fn writable(&self) -> bool { + matches!( + self, + MmapPerms::Write + | MmapPerms::ReadWrite + | MmapPerms::WriteExecute + | MmapPerms::ReadWriteExecute + ) + } + + #[must_use] + pub fn executable(&self) -> bool { + matches!( + self, + MmapPerms::Execute + | MmapPerms::ReadExecute + | MmapPerms::WriteExecute + | MmapPerms::ReadWriteExecute + ) + } +} + +#[cfg(feature = "python")] +impl<'py> IntoPyObject<'py> for MmapPerms { + type Target = PyInt; + type Output = Bound<'py, Self::Target>; + type Error = Infallible; + + fn into_pyobject(self, py: Python<'py>) -> Result { + let n: i32 = self.into(); + n.into_pyobject(py) + } +} + +#[cfg(target_os = "linux")] impl From for MapInfo { fn from(map_info: libafl_mapinfo) -> Self { let path: Option = if map_info.path.is_null() { @@ -39,7 +125,7 @@ impl From for MapInfo { Some( from_utf8_unchecked(from_raw_parts( map_info.path as *const u8, - strlen(map_info.path as *const u8), + strlen(map_info.path as *const c_char), )) .to_string(), ) diff --git a/libafl_qemu/libafl_qemu_sys/src/x86_64_stub_bindings.rs b/libafl_qemu/libafl_qemu_sys/src/x86_64_stub_bindings.rs deleted file mode 100644 index 138fd3a954..0000000000 --- a/libafl_qemu/libafl_qemu_sys/src/x86_64_stub_bindings.rs +++ /dev/null @@ -1,13896 +0,0 @@ -/* 1.80.0-nightly */ -/* automatically generated by rust-bindgen 0.69.4 */ - -#[repr(C)] -#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)] -pub struct __BindgenBitfieldUnit { - storage: Storage, -} -impl __BindgenBitfieldUnit { - #[inline] - pub const fn new(storage: Storage) -> Self { - Self { storage } - } -} -impl __BindgenBitfieldUnit -where - Storage: AsRef<[u8]> + AsMut<[u8]>, -{ - #[inline] - pub fn get_bit(&self, index: usize) -> bool { - debug_assert!(index / 8 < self.storage.as_ref().len()); - let byte_index = index / 8; - let byte = self.storage.as_ref()[byte_index]; - let bit_index = if cfg!(target_endian = "big") { - 7 - (index % 8) - } else { - index % 8 - }; - let mask = 1 << bit_index; - byte & mask == mask - } - #[inline] - pub fn set_bit(&mut self, index: usize, val: bool) { - debug_assert!(index / 8 < self.storage.as_ref().len()); - let byte_index = index / 8; - let byte = &mut self.storage.as_mut()[byte_index]; - let bit_index = if cfg!(target_endian = "big") { - 7 - (index % 8) - } else { - index % 8 - }; - let mask = 1 << bit_index; - if val { - *byte |= mask; - } else { - *byte &= !mask; - } - } - #[inline] - pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); - debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); - let mut val = 0; - for i in 0..(bit_width as usize) { - if self.get_bit(i + bit_offset) { - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - val |= 1 << index; - } - } - val - } - #[inline] - pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) { - debug_assert!(bit_width <= 64); - debug_assert!(bit_offset / 8 < self.storage.as_ref().len()); - debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len()); - for i in 0..(bit_width as usize) { - let mask = 1 << i; - let val_bit_is_set = val & mask == mask; - let index = if cfg!(target_endian = "big") { - bit_width as usize - 1 - i - } else { - i - }; - self.set_bit(index + bit_offset, val_bit_is_set); - } - } -} -#[repr(C)] -#[derive(Default)] -pub struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); -impl __IncompleteArrayField { - #[inline] - pub const fn new() -> Self { - __IncompleteArrayField(::std::marker::PhantomData, []) - } - #[inline] - pub fn as_ptr(&self) -> *const T { - self as *const _ as *const T - } - #[inline] - pub fn as_mut_ptr(&mut self) -> *mut T { - self as *mut _ as *mut T - } - #[inline] - pub unsafe fn as_slice(&self, len: usize) -> &[T] { - ::std::slice::from_raw_parts(self.as_ptr(), len) - } - #[inline] - pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { - ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) - } -} -impl ::std::fmt::Debug for __IncompleteArrayField { - fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - fmt.write_str("__IncompleteArrayField") - } -} -pub type __off_t = ::std::os::raw::c_long; -pub type __off64_t = ::std::os::raw::c_long; -pub type off_t = __off64_t; -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct __sigset_t { - pub __val: [::std::os::raw::c_ulong; 16usize], -} -#[test] -fn bindgen_test_layout___sigset_t() { - const UNINIT: ::std::mem::MaybeUninit<__sigset_t> = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<__sigset_t>(), - 128usize, - concat!("Size of: ", stringify!(__sigset_t)) - ); - assert_eq!( - ::std::mem::align_of::<__sigset_t>(), - 8usize, - concat!("Alignment of ", stringify!(__sigset_t)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__val) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__sigset_t), - "::", - stringify!(__val) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union __atomic_wide_counter { - pub __value64: ::std::os::raw::c_ulonglong, - pub __value32: __atomic_wide_counter__bindgen_ty_1, -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct __atomic_wide_counter__bindgen_ty_1 { - pub __low: ::std::os::raw::c_uint, - pub __high: ::std::os::raw::c_uint, -} -#[test] -fn bindgen_test_layout___atomic_wide_counter__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit<__atomic_wide_counter__bindgen_ty_1> = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<__atomic_wide_counter__bindgen_ty_1>(), - 8usize, - concat!("Size of: ", stringify!(__atomic_wide_counter__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::<__atomic_wide_counter__bindgen_ty_1>(), - 4usize, - concat!( - "Alignment of ", - stringify!(__atomic_wide_counter__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__low) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__atomic_wide_counter__bindgen_ty_1), - "::", - stringify!(__low) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__high) as usize - ptr as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(__atomic_wide_counter__bindgen_ty_1), - "::", - stringify!(__high) - ) - ); -} -#[test] -fn bindgen_test_layout___atomic_wide_counter() { - const UNINIT: ::std::mem::MaybeUninit<__atomic_wide_counter> = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<__atomic_wide_counter>(), - 8usize, - concat!("Size of: ", stringify!(__atomic_wide_counter)) - ); - assert_eq!( - ::std::mem::align_of::<__atomic_wide_counter>(), - 8usize, - concat!("Alignment of ", stringify!(__atomic_wide_counter)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__value64) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__atomic_wide_counter), - "::", - stringify!(__value64) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__value32) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__atomic_wide_counter), - "::", - stringify!(__value32) - ) - ); -} -impl Default for __atomic_wide_counter { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for __atomic_wide_counter { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "__atomic_wide_counter {{ union }}") - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct __pthread_internal_list { - pub __prev: *mut __pthread_internal_list, - pub __next: *mut __pthread_internal_list, -} -#[test] -fn bindgen_test_layout___pthread_internal_list() { - const UNINIT: ::std::mem::MaybeUninit<__pthread_internal_list> = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<__pthread_internal_list>(), - 16usize, - concat!("Size of: ", stringify!(__pthread_internal_list)) - ); - assert_eq!( - ::std::mem::align_of::<__pthread_internal_list>(), - 8usize, - concat!("Alignment of ", stringify!(__pthread_internal_list)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__prev) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__pthread_internal_list), - "::", - stringify!(__prev) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__next) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__pthread_internal_list), - "::", - stringify!(__next) - ) - ); -} -impl Default for __pthread_internal_list { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -pub type __pthread_list_t = __pthread_internal_list; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct __pthread_mutex_s { - pub __lock: ::std::os::raw::c_int, - pub __count: ::std::os::raw::c_uint, - pub __owner: ::std::os::raw::c_int, - pub __nusers: ::std::os::raw::c_uint, - pub __kind: ::std::os::raw::c_int, - pub __spins: ::std::os::raw::c_short, - pub __elision: ::std::os::raw::c_short, - pub __list: __pthread_list_t, -} -#[test] -fn bindgen_test_layout___pthread_mutex_s() { - const UNINIT: ::std::mem::MaybeUninit<__pthread_mutex_s> = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<__pthread_mutex_s>(), - 40usize, - concat!("Size of: ", stringify!(__pthread_mutex_s)) - ); - assert_eq!( - ::std::mem::align_of::<__pthread_mutex_s>(), - 8usize, - concat!("Alignment of ", stringify!(__pthread_mutex_s)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__lock) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__pthread_mutex_s), - "::", - stringify!(__lock) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__count) as usize - ptr as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(__pthread_mutex_s), - "::", - stringify!(__count) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__owner) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__pthread_mutex_s), - "::", - stringify!(__owner) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__nusers) as usize - ptr as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(__pthread_mutex_s), - "::", - stringify!(__nusers) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__kind) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__pthread_mutex_s), - "::", - stringify!(__kind) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__spins) as usize - ptr as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(__pthread_mutex_s), - "::", - stringify!(__spins) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__elision) as usize - ptr as usize }, - 22usize, - concat!( - "Offset of field: ", - stringify!(__pthread_mutex_s), - "::", - stringify!(__elision) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__list) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__pthread_mutex_s), - "::", - stringify!(__list) - ) - ); -} -impl Default for __pthread_mutex_s { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct __pthread_cond_s { - pub __wseq: __atomic_wide_counter, - pub __g1_start: __atomic_wide_counter, - pub __g_refs: [::std::os::raw::c_uint; 2usize], - pub __g_size: [::std::os::raw::c_uint; 2usize], - pub __g1_orig_size: ::std::os::raw::c_uint, - pub __wrefs: ::std::os::raw::c_uint, - pub __g_signals: [::std::os::raw::c_uint; 2usize], -} -#[test] -fn bindgen_test_layout___pthread_cond_s() { - const UNINIT: ::std::mem::MaybeUninit<__pthread_cond_s> = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<__pthread_cond_s>(), - 48usize, - concat!("Size of: ", stringify!(__pthread_cond_s)) - ); - assert_eq!( - ::std::mem::align_of::<__pthread_cond_s>(), - 8usize, - concat!("Alignment of ", stringify!(__pthread_cond_s)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__wseq) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__pthread_cond_s), - "::", - stringify!(__wseq) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__g1_start) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(__pthread_cond_s), - "::", - stringify!(__g1_start) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__g_refs) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(__pthread_cond_s), - "::", - stringify!(__g_refs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__g_size) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(__pthread_cond_s), - "::", - stringify!(__g_size) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__g1_orig_size) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(__pthread_cond_s), - "::", - stringify!(__g1_orig_size) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__wrefs) as usize - ptr as usize }, - 36usize, - concat!( - "Offset of field: ", - stringify!(__pthread_cond_s), - "::", - stringify!(__wrefs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__g_signals) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(__pthread_cond_s), - "::", - stringify!(__g_signals) - ) - ); -} -impl Default for __pthread_cond_s { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for __pthread_cond_s { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write ! (f , "__pthread_cond_s {{ __wseq: {:?}, __g1_start: {:?}, __g_refs: {:?}, __g_size: {:?}, __g1_orig_size: {:?}, __wrefs: {:?}, __g_signals: {:?} }}" , self . __wseq , self . __g1_start , self . __g_refs , self . __g_size , self . __g1_orig_size , self . __wrefs , self . __g_signals) - } -} -pub type pthread_t = ::std::os::raw::c_ulong; -#[repr(C)] -#[derive(Copy, Clone)] -pub union pthread_mutex_t { - pub __data: __pthread_mutex_s, - pub __size: [::std::os::raw::c_char; 40usize], - pub __align: ::std::os::raw::c_long, -} -#[test] -fn bindgen_test_layout_pthread_mutex_t() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(pthread_mutex_t)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(pthread_mutex_t)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__data) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(pthread_mutex_t), - "::", - stringify!(__data) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__size) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(pthread_mutex_t), - "::", - stringify!(__size) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__align) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(pthread_mutex_t), - "::", - stringify!(__align) - ) - ); -} -impl Default for pthread_mutex_t { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for pthread_mutex_t { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "pthread_mutex_t {{ union }}") - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union pthread_cond_t { - pub __data: __pthread_cond_s, - pub __size: [::std::os::raw::c_char; 48usize], - pub __align: ::std::os::raw::c_longlong, -} -#[test] -fn bindgen_test_layout_pthread_cond_t() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 48usize, - concat!("Size of: ", stringify!(pthread_cond_t)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(pthread_cond_t)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__data) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(pthread_cond_t), - "::", - stringify!(__data) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__size) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(pthread_cond_t), - "::", - stringify!(__size) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__align) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(pthread_cond_t), - "::", - stringify!(__align) - ) - ); -} -impl Default for pthread_cond_t { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for pthread_cond_t { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "pthread_cond_t {{ union }}") - } -} -pub type FILE = _IO_FILE; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _IO_marker { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _IO_codecvt { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _IO_wide_data { - _unused: [u8; 0], -} -pub type _IO_lock_t = ::std::os::raw::c_void; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _IO_FILE { - pub _flags: ::std::os::raw::c_int, - pub _IO_read_ptr: *mut ::std::os::raw::c_char, - pub _IO_read_end: *mut ::std::os::raw::c_char, - pub _IO_read_base: *mut ::std::os::raw::c_char, - pub _IO_write_base: *mut ::std::os::raw::c_char, - pub _IO_write_ptr: *mut ::std::os::raw::c_char, - pub _IO_write_end: *mut ::std::os::raw::c_char, - pub _IO_buf_base: *mut ::std::os::raw::c_char, - pub _IO_buf_end: *mut ::std::os::raw::c_char, - pub _IO_save_base: *mut ::std::os::raw::c_char, - pub _IO_backup_base: *mut ::std::os::raw::c_char, - pub _IO_save_end: *mut ::std::os::raw::c_char, - pub _markers: *mut _IO_marker, - pub _chain: *mut _IO_FILE, - pub _fileno: ::std::os::raw::c_int, - pub _flags2: ::std::os::raw::c_int, - pub _old_offset: __off_t, - pub _cur_column: ::std::os::raw::c_ushort, - pub _vtable_offset: ::std::os::raw::c_schar, - pub _shortbuf: [::std::os::raw::c_char; 1usize], - pub _lock: *mut _IO_lock_t, - pub _offset: __off64_t, - pub _codecvt: *mut _IO_codecvt, - pub _wide_data: *mut _IO_wide_data, - pub _freeres_list: *mut _IO_FILE, - pub _freeres_buf: *mut ::std::os::raw::c_void, - pub __pad5: usize, - pub _mode: ::std::os::raw::c_int, - pub _unused2: [::std::os::raw::c_char; 20usize], -} -#[test] -fn bindgen_test_layout__IO_FILE() { - const UNINIT: ::std::mem::MaybeUninit<_IO_FILE> = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<_IO_FILE>(), - 216usize, - concat!("Size of: ", stringify!(_IO_FILE)) - ); - assert_eq!( - ::std::mem::align_of::<_IO_FILE>(), - 8usize, - concat!("Alignment of ", stringify!(_IO_FILE)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._flags) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_flags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._IO_read_ptr) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_IO_read_ptr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._IO_read_end) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_IO_read_end) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._IO_read_base) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_IO_read_base) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._IO_write_base) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_IO_write_base) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._IO_write_ptr) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_IO_write_ptr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._IO_write_end) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_IO_write_end) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._IO_buf_base) as usize - ptr as usize }, - 56usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_IO_buf_base) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._IO_buf_end) as usize - ptr as usize }, - 64usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_IO_buf_end) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._IO_save_base) as usize - ptr as usize }, - 72usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_IO_save_base) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._IO_backup_base) as usize - ptr as usize }, - 80usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_IO_backup_base) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._IO_save_end) as usize - ptr as usize }, - 88usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_IO_save_end) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._markers) as usize - ptr as usize }, - 96usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_markers) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._chain) as usize - ptr as usize }, - 104usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_chain) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._fileno) as usize - ptr as usize }, - 112usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_fileno) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._flags2) as usize - ptr as usize }, - 116usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_flags2) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._old_offset) as usize - ptr as usize }, - 120usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_old_offset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._cur_column) as usize - ptr as usize }, - 128usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_cur_column) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._vtable_offset) as usize - ptr as usize }, - 130usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_vtable_offset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._shortbuf) as usize - ptr as usize }, - 131usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_shortbuf) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._lock) as usize - ptr as usize }, - 136usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_lock) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._offset) as usize - ptr as usize }, - 144usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_offset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._codecvt) as usize - ptr as usize }, - 152usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_codecvt) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._wide_data) as usize - ptr as usize }, - 160usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_wide_data) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._freeres_list) as usize - ptr as usize }, - 168usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_freeres_list) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._freeres_buf) as usize - ptr as usize }, - 176usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_freeres_buf) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__pad5) as usize - ptr as usize }, - 184usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(__pad5) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._mode) as usize - ptr as usize }, - 192usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_mode) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._unused2) as usize - ptr as usize }, - 196usize, - concat!( - "Offset of field: ", - stringify!(_IO_FILE), - "::", - stringify!(_unused2) - ) - ); -} -impl Default for _IO_FILE { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -pub type __jmp_buf = [::std::os::raw::c_long; 8usize]; -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct __jmp_buf_tag { - pub __jmpbuf: __jmp_buf, - pub __mask_was_saved: ::std::os::raw::c_int, - pub __saved_mask: __sigset_t, -} -#[test] -fn bindgen_test_layout___jmp_buf_tag() { - const UNINIT: ::std::mem::MaybeUninit<__jmp_buf_tag> = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<__jmp_buf_tag>(), - 200usize, - concat!("Size of: ", stringify!(__jmp_buf_tag)) - ); - assert_eq!( - ::std::mem::align_of::<__jmp_buf_tag>(), - 8usize, - concat!("Alignment of ", stringify!(__jmp_buf_tag)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__jmpbuf) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__jmp_buf_tag), - "::", - stringify!(__jmpbuf) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__mask_was_saved) as usize - ptr as usize }, - 64usize, - concat!( - "Offset of field: ", - stringify!(__jmp_buf_tag), - "::", - stringify!(__mask_was_saved) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__saved_mask) as usize - ptr as usize }, - 72usize, - concat!( - "Offset of field: ", - stringify!(__jmp_buf_tag), - "::", - stringify!(__saved_mask) - ) - ); -} -pub type sigjmp_buf = [__jmp_buf_tag; 1usize]; -pub type guint8 = ::std::os::raw::c_uchar; -pub type gchar = ::std::os::raw::c_char; -pub type guint = ::std::os::raw::c_uint; -pub type gpointer = *mut ::std::os::raw::c_void; -pub type GArray = _GArray; -pub type GByteArray = _GByteArray; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _GArray { - pub data: *mut gchar, - pub len: guint, -} -#[test] -fn bindgen_test_layout__GArray() { - const UNINIT: ::std::mem::MaybeUninit<_GArray> = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<_GArray>(), - 16usize, - concat!("Size of: ", stringify!(_GArray)) - ); - assert_eq!( - ::std::mem::align_of::<_GArray>(), - 8usize, - concat!("Alignment of ", stringify!(_GArray)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).data) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_GArray), - "::", - stringify!(data) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).len) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_GArray), - "::", - stringify!(len) - ) - ); -} -impl Default for _GArray { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _GByteArray { - pub data: *mut guint8, - pub len: guint, -} -#[test] -fn bindgen_test_layout__GByteArray() { - const UNINIT: ::std::mem::MaybeUninit<_GByteArray> = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<_GByteArray>(), - 16usize, - concat!("Size of: ", stringify!(_GByteArray)) - ); - assert_eq!( - ::std::mem::align_of::<_GByteArray>(), - 8usize, - concat!("Alignment of ", stringify!(_GByteArray)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).data) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_GByteArray), - "::", - stringify!(data) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).len) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_GByteArray), - "::", - stringify!(len) - ) - ); -} -impl Default for _GByteArray { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _GHashTable { - _unused: [u8; 0], -} -pub type GHashTable = _GHashTable; -pub type GSList = _GSList; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct _GSList { - pub data: gpointer, - pub next: *mut GSList, -} -#[test] -fn bindgen_test_layout__GSList() { - const UNINIT: ::std::mem::MaybeUninit<_GSList> = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<_GSList>(), - 16usize, - concat!("Size of: ", stringify!(_GSList)) - ); - assert_eq!( - ::std::mem::align_of::<_GSList>(), - 8usize, - concat!("Alignment of ", stringify!(_GSList)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).data) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(_GSList), - "::", - stringify!(data) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).next) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(_GSList), - "::", - stringify!(next) - ) - ); -} -impl Default for _GSList { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct AccelCPUState { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct AddressSpace { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct Clock { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct CPUAddressSpace { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct CpuInfoFast { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct CPUJumpCache { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct Error { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct MemoryRegion { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct QDict { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct QObject { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct RAMBlock { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct TCGCPUOps { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct Visitor { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct VMChangeStateEntry { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct VMStateDescription { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct IRQState { - _unused: [u8; 0], -} -pub type qemu_irq = *mut IRQState; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct QEnumLookup { - pub array: *const *const ::std::os::raw::c_char, - pub special_features: *const ::std::os::raw::c_uchar, - pub size: ::std::os::raw::c_int, -} -#[test] -fn bindgen_test_layout_QEnumLookup() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(QEnumLookup)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(QEnumLookup)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).array) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(QEnumLookup), - "::", - stringify!(array) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).special_features) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(QEnumLookup), - "::", - stringify!(special_features) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).size) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(QEnumLookup), - "::", - stringify!(size) - ) - ); -} -impl Default for QEnumLookup { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -extern "C" { - pub fn qemu_target_page_size() -> usize; -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct QemuMutex { - pub lock: pthread_mutex_t, - pub initialized: bool, -} -#[test] -fn bindgen_test_layout_QemuMutex() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 48usize, - concat!("Size of: ", stringify!(QemuMutex)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(QemuMutex)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).lock) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(QemuMutex), - "::", - stringify!(lock) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).initialized) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(QemuMutex), - "::", - stringify!(initialized) - ) - ); -} -impl Default for QemuMutex { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for QemuMutex { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!( - f, - "QemuMutex {{ lock: {:?}, initialized: {:?} }}", - self.lock, self.initialized - ) - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct QemuCond { - pub cond: pthread_cond_t, - pub initialized: bool, -} -#[test] -fn bindgen_test_layout_QemuCond() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 56usize, - concat!("Size of: ", stringify!(QemuCond)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(QemuCond)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cond) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(QemuCond), - "::", - stringify!(cond) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).initialized) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(QemuCond), - "::", - stringify!(initialized) - ) - ); -} -impl Default for QemuCond { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for QemuCond { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!( - f, - "QemuCond {{ cond: {:?}, initialized: {:?} }}", - self.cond, self.initialized - ) - } -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct QemuThread { - pub thread: pthread_t, -} -#[test] -fn bindgen_test_layout_QemuThread() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(QemuThread)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(QemuThread)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).thread) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(QemuThread), - "::", - stringify!(thread) - ) - ); -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct QemuSpin { - pub value: ::std::os::raw::c_int, -} -#[test] -fn bindgen_test_layout_QemuSpin() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 4usize, - concat!("Size of: ", stringify!(QemuSpin)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(QemuSpin)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).value) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(QemuSpin), - "::", - stringify!(value) - ) - ); -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct QemuLockCnt { - pub count: ::std::os::raw::c_uint, -} -#[test] -fn bindgen_test_layout_QemuLockCnt() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 4usize, - concat!("Size of: ", stringify!(QemuLockCnt)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(QemuLockCnt)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).count) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(QemuLockCnt), - "::", - stringify!(count) - ) - ); -} -#[repr(C)] -#[repr(align(4))] -#[derive(Debug, Default, Copy, Clone)] -pub struct MemTxAttrs { - pub _bitfield_align_1: [u16; 0], - pub _bitfield_1: __BindgenBitfieldUnit<[u8; 3usize]>, -} -#[test] -fn bindgen_test_layout_MemTxAttrs() { - assert_eq!( - ::std::mem::size_of::(), - 4usize, - concat!("Size of: ", stringify!(MemTxAttrs)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(MemTxAttrs)) - ); -} -impl MemTxAttrs { - #[inline] - pub fn unspecified(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u32) } - } - #[inline] - pub fn set_unspecified(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 1u8, val as u64) - } - } - #[inline] - pub fn secure(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u32) } - } - #[inline] - pub fn set_secure(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(1usize, 1u8, val as u64) - } - } - #[inline] - pub fn space(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 2u8) as u32) } - } - #[inline] - pub fn set_space(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(2usize, 2u8, val as u64) - } - } - #[inline] - pub fn user(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u32) } - } - #[inline] - pub fn set_user(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(4usize, 1u8, val as u64) - } - } - #[inline] - pub fn memory(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u32) } - } - #[inline] - pub fn set_memory(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(5usize, 1u8, val as u64) - } - } - #[inline] - pub fn requester_id(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 16u8) as u32) } - } - #[inline] - pub fn set_requester_id(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(6usize, 16u8, val as u64) - } - } - #[inline] - pub fn new_bitfield_1( - unspecified: ::std::os::raw::c_uint, - secure: ::std::os::raw::c_uint, - space: ::std::os::raw::c_uint, - user: ::std::os::raw::c_uint, - memory: ::std::os::raw::c_uint, - requester_id: ::std::os::raw::c_uint, - ) -> __BindgenBitfieldUnit<[u8; 3usize]> { - let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 3usize]> = Default::default(); - __bindgen_bitfield_unit.set(0usize, 1u8, { - let unspecified: u32 = unsafe { ::std::mem::transmute(unspecified) }; - unspecified as u64 - }); - __bindgen_bitfield_unit.set(1usize, 1u8, { - let secure: u32 = unsafe { ::std::mem::transmute(secure) }; - secure as u64 - }); - __bindgen_bitfield_unit.set(2usize, 2u8, { - let space: u32 = unsafe { ::std::mem::transmute(space) }; - space as u64 - }); - __bindgen_bitfield_unit.set(4usize, 1u8, { - let user: u32 = unsafe { ::std::mem::transmute(user) }; - user as u64 - }); - __bindgen_bitfield_unit.set(5usize, 1u8, { - let memory: u32 = unsafe { ::std::mem::transmute(memory) }; - memory as u64 - }); - __bindgen_bitfield_unit.set(6usize, 16u8, { - let requester_id: u32 = unsafe { ::std::mem::transmute(requester_id) }; - requester_id as u64 - }); - __bindgen_bitfield_unit - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct QTailQLink { - pub tql_next: *mut ::std::os::raw::c_void, - pub tql_prev: *mut QTailQLink, -} -#[test] -fn bindgen_test_layout_QTailQLink() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(QTailQLink)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(QTailQLink)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tql_next) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(QTailQLink), - "::", - stringify!(tql_next) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tql_prev) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(QTailQLink), - "::", - stringify!(tql_prev) - ) - ); -} -impl Default for QTailQLink { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct Notifier { - pub notify: ::std::option::Option< - unsafe extern "C" fn(notifier: *mut Notifier, data: *mut ::std::os::raw::c_void), - >, - pub node: Notifier__bindgen_ty_1, -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct Notifier__bindgen_ty_1 { - pub le_next: *mut Notifier, - pub le_prev: *mut *mut Notifier, -} -#[test] -fn bindgen_test_layout_Notifier__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(Notifier__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(Notifier__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).le_next) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(Notifier__bindgen_ty_1), - "::", - stringify!(le_next) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).le_prev) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(Notifier__bindgen_ty_1), - "::", - stringify!(le_prev) - ) - ); -} -impl Default for Notifier__bindgen_ty_1 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[test] -fn bindgen_test_layout_Notifier() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(Notifier)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(Notifier)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).notify) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(Notifier), - "::", - stringify!(notify) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).node) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(Notifier), - "::", - stringify!(node) - ) - ); -} -impl Default for Notifier { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -pub type RCUCBFunc = ::std::option::Option; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct rcu_head { - pub next: *mut rcu_head, - pub func: RCUCBFunc, -} -#[test] -fn bindgen_test_layout_rcu_head() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(rcu_head)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(rcu_head)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).next) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(rcu_head), - "::", - stringify!(next) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).func) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(rcu_head), - "::", - stringify!(func) - ) - ); -} -impl Default for rcu_head { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct TypeImpl { - _unused: [u8; 0], -} -pub type Type = *mut TypeImpl; -#[doc = " typedef ObjectPropertyAccessor:\n @obj: the object that owns the property\n @v: the visitor that contains the property data\n @name: the name of the property\n @opaque: the object property opaque\n @errp: a pointer to an Error that is filled if getting/setting fails.\n\n Called when trying to get/set a property."] -pub type ObjectPropertyAccessor = ::std::option::Option< - unsafe extern "C" fn( - obj: *mut Object, - v: *mut Visitor, - name: *const ::std::os::raw::c_char, - opaque: *mut ::std::os::raw::c_void, - errp: *mut *mut Error, - ), ->; -#[doc = " typedef ObjectPropertyResolve:\n @obj: the object that owns the property\n @opaque: the opaque registered with the property\n @part: the name of the property\n\n Resolves the #Object corresponding to property @part.\n\n The returned object can also be used as a starting point\n to resolve a relative path starting with \"@part\".\n\n Returns: If @path is the path that led to @obj, the function\n returns the #Object corresponding to \"@path/@part\".\n If \"@path/@part\" is not a valid object path, it returns #NULL."] -pub type ObjectPropertyResolve = ::std::option::Option< - unsafe extern "C" fn( - obj: *mut Object, - opaque: *mut ::std::os::raw::c_void, - part: *const ::std::os::raw::c_char, - ) -> *mut Object, ->; -#[doc = " typedef ObjectPropertyRelease:\n @obj: the object that owns the property\n @name: the name of the property\n @opaque: the opaque registered with the property\n\n Called when a property is removed from a object."] -pub type ObjectPropertyRelease = ::std::option::Option< - unsafe extern "C" fn( - obj: *mut Object, - name: *const ::std::os::raw::c_char, - opaque: *mut ::std::os::raw::c_void, - ), ->; -#[doc = " typedef ObjectPropertyInit:\n @obj: the object that owns the property\n @prop: the property to set\n\n Called when a property is initialized."] -pub type ObjectPropertyInit = - ::std::option::Option; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct ObjectProperty { - pub name: *mut ::std::os::raw::c_char, - pub type_: *mut ::std::os::raw::c_char, - pub description: *mut ::std::os::raw::c_char, - pub get: ObjectPropertyAccessor, - pub set: ObjectPropertyAccessor, - pub resolve: ObjectPropertyResolve, - pub release: ObjectPropertyRelease, - pub init: ObjectPropertyInit, - pub opaque: *mut ::std::os::raw::c_void, - pub defval: *mut QObject, -} -#[test] -fn bindgen_test_layout_ObjectProperty() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 80usize, - concat!("Size of: ", stringify!(ObjectProperty)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(ObjectProperty)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).name) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ObjectProperty), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).type_) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(ObjectProperty), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).description) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(ObjectProperty), - "::", - stringify!(description) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).get) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(ObjectProperty), - "::", - stringify!(get) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).set) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(ObjectProperty), - "::", - stringify!(set) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).resolve) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(ObjectProperty), - "::", - stringify!(resolve) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).release) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(ObjectProperty), - "::", - stringify!(release) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).init) as usize - ptr as usize }, - 56usize, - concat!( - "Offset of field: ", - stringify!(ObjectProperty), - "::", - stringify!(init) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).opaque) as usize - ptr as usize }, - 64usize, - concat!( - "Offset of field: ", - stringify!(ObjectProperty), - "::", - stringify!(opaque) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).defval) as usize - ptr as usize }, - 72usize, - concat!( - "Offset of field: ", - stringify!(ObjectProperty), - "::", - stringify!(defval) - ) - ); -} -impl Default for ObjectProperty { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[doc = " typedef ObjectUnparent:\n @obj: the object that is being removed from the composition tree\n\n Called when an object is being removed from the QOM composition tree.\n The function should remove any backlinks from children objects to @obj."] -pub type ObjectUnparent = ::std::option::Option; -#[doc = " typedef ObjectFree:\n @obj: the object being freed\n\n Called when an object's last reference is removed."] -pub type ObjectFree = ::std::option::Option; -#[doc = " struct ObjectClass:\n\n The base for all classes. The only thing that #ObjectClass contains is an\n integer type handle."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct ObjectClass { - pub type_: Type, - pub interfaces: *mut GSList, - pub object_cast_cache: [*const ::std::os::raw::c_char; 4usize], - pub class_cast_cache: [*const ::std::os::raw::c_char; 4usize], - pub unparent: ObjectUnparent, - pub properties: *mut GHashTable, -} -#[test] -fn bindgen_test_layout_ObjectClass() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 96usize, - concat!("Size of: ", stringify!(ObjectClass)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(ObjectClass)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).type_) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ObjectClass), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).interfaces) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(ObjectClass), - "::", - stringify!(interfaces) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).object_cast_cache) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(ObjectClass), - "::", - stringify!(object_cast_cache) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).class_cast_cache) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(ObjectClass), - "::", - stringify!(class_cast_cache) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).unparent) as usize - ptr as usize }, - 80usize, - concat!( - "Offset of field: ", - stringify!(ObjectClass), - "::", - stringify!(unparent) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).properties) as usize - ptr as usize }, - 88usize, - concat!( - "Offset of field: ", - stringify!(ObjectClass), - "::", - stringify!(properties) - ) - ); -} -impl Default for ObjectClass { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[doc = " struct Object:\n\n The base for all objects. The first member of this object is a pointer to\n a #ObjectClass. Since C guarantees that the first member of a structure\n always begins at byte 0 of that structure, as long as any sub-object places\n its parent as the first member, we can cast directly to a #Object.\n\n As a result, #Object contains a reference to the objects type as its\n first member. This allows identification of the real type of the object at\n run time."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct Object { - pub class: *mut ObjectClass, - pub free: ObjectFree, - pub properties: *mut GHashTable, - pub ref_: u32, - pub parent: *mut Object, -} -#[test] -fn bindgen_test_layout_Object() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(Object)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(Object)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).class) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(Object), - "::", - stringify!(class) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).free) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(Object), - "::", - stringify!(free) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).properties) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(Object), - "::", - stringify!(properties) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).ref_) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(Object), - "::", - stringify!(ref_) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).parent) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(Object), - "::", - stringify!(parent) - ) - ); -} -impl Default for Object { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct HotplugHandler { - _unused: [u8; 0], -} -#[doc = " ResettableState:\n Structure holding reset related state. The fields should not be accessed\n directly; the definition is here to allow further inclusion into other\n objects.\n\n @count: Number of reset level the object is into. It is incremented when\n the reset operation starts and decremented when it finishes.\n @hold_phase_pending: flag which indicates that we need to invoke the 'hold'\n phase handler for this object.\n @exit_phase_in_progress: true if we are currently in the exit phase"] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct ResettableState { - pub count: ::std::os::raw::c_uint, - pub hold_phase_pending: bool, - pub exit_phase_in_progress: bool, -} -#[test] -fn bindgen_test_layout_ResettableState() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(ResettableState)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(ResettableState)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).count) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ResettableState), - "::", - stringify!(count) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hold_phase_pending) as usize - ptr as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(ResettableState), - "::", - stringify!(hold_phase_pending) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).exit_phase_in_progress) as usize - ptr as usize }, - 5usize, - concat!( - "Offset of field: ", - stringify!(ResettableState), - "::", - stringify!(exit_phase_in_progress) - ) - ); -} -pub type DeviceRealize = - ::std::option::Option; -pub type DeviceUnrealize = ::std::option::Option; -pub type DeviceReset = ::std::option::Option; -#[doc = " struct DeviceClass - The base class for all devices.\n @props: Properties accessing state fields.\n @realize: Callback function invoked when the #DeviceState:realized\n property is changed to %true.\n @unrealize: Callback function invoked when the #DeviceState:realized\n property is changed to %false.\n @hotpluggable: indicates if #DeviceClass is hotpluggable, available\n as readonly \"hotpluggable\" property of #DeviceState instance\n"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct DeviceClass { - pub parent_class: ObjectClass, - pub categories: [::std::os::raw::c_ulong; 1usize], - #[doc = " @fw_name: name used to identify device to firmware interfaces"] - pub fw_name: *const ::std::os::raw::c_char, - #[doc = " @desc: human readable description of device"] - pub desc: *const ::std::os::raw::c_char, - #[doc = " @props_: properties associated with device, should only be\n assigned by using device_class_set_props(). The underscore\n ensures a compile-time error if someone attempts to assign\n dc->props directly."] - pub props_: *mut Property, - #[doc = " @user_creatable: Can user instantiate with -device / device_add?\n\n All devices should support instantiation with device_add, and\n this flag should not exist. But we're not there, yet. Some\n devices fail to instantiate with cryptic error messages.\n Others instantiate, but don't work. Exposing users to such\n behavior would be cruel; clearing this flag will protect them.\n It should never be cleared without a comment explaining why it\n is cleared.\n\n TODO remove once we're there"] - pub user_creatable: bool, - pub hotpluggable: bool, - #[doc = " @reset: deprecated device reset method pointer\n\n Modern code should use the ResettableClass interface to\n implement a multi-phase reset.\n\n TODO: remove once every reset callback is unused"] - pub reset: DeviceReset, - pub realize: DeviceRealize, - pub unrealize: DeviceUnrealize, - #[doc = " @vmsd: device state serialisation description for\n migration/save/restore"] - pub vmsd: *const VMStateDescription, - #[doc = " @bus_type: bus type\n private: to qdev / bus."] - pub bus_type: *const ::std::os::raw::c_char, -} -#[test] -fn bindgen_test_layout_DeviceClass() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 176usize, - concat!("Size of: ", stringify!(DeviceClass)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(DeviceClass)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).parent_class) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(parent_class) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).categories) as usize - ptr as usize }, - 96usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(categories) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fw_name) as usize - ptr as usize }, - 104usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(fw_name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).desc) as usize - ptr as usize }, - 112usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(desc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).props_) as usize - ptr as usize }, - 120usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(props_) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).user_creatable) as usize - ptr as usize }, - 128usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(user_creatable) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hotpluggable) as usize - ptr as usize }, - 129usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(hotpluggable) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).reset) as usize - ptr as usize }, - 136usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(reset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).realize) as usize - ptr as usize }, - 144usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(realize) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).unrealize) as usize - ptr as usize }, - 152usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(unrealize) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).vmsd) as usize - ptr as usize }, - 160usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(vmsd) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).bus_type) as usize - ptr as usize }, - 168usize, - concat!( - "Offset of field: ", - stringify!(DeviceClass), - "::", - stringify!(bus_type) - ) - ); -} -impl Default for DeviceClass { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct NamedGPIOList { - pub name: *mut ::std::os::raw::c_char, - pub in_: *mut qemu_irq, - pub num_in: ::std::os::raw::c_int, - pub num_out: ::std::os::raw::c_int, - pub node: NamedGPIOList__bindgen_ty_1, -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct NamedGPIOList__bindgen_ty_1 { - pub le_next: *mut NamedGPIOList, - pub le_prev: *mut *mut NamedGPIOList, -} -#[test] -fn bindgen_test_layout_NamedGPIOList__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(NamedGPIOList__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(NamedGPIOList__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).le_next) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(NamedGPIOList__bindgen_ty_1), - "::", - stringify!(le_next) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).le_prev) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(NamedGPIOList__bindgen_ty_1), - "::", - stringify!(le_prev) - ) - ); -} -impl Default for NamedGPIOList__bindgen_ty_1 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[test] -fn bindgen_test_layout_NamedGPIOList() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(NamedGPIOList)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(NamedGPIOList)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).name) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(NamedGPIOList), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).in_) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(NamedGPIOList), - "::", - stringify!(in_) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).num_in) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(NamedGPIOList), - "::", - stringify!(num_in) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).num_out) as usize - ptr as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(NamedGPIOList), - "::", - stringify!(num_out) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).node) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(NamedGPIOList), - "::", - stringify!(node) - ) - ); -} -impl Default for NamedGPIOList { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct NamedClockList { - pub name: *mut ::std::os::raw::c_char, - pub clock: *mut Clock, - pub output: bool, - pub alias: bool, - pub node: NamedClockList__bindgen_ty_1, -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct NamedClockList__bindgen_ty_1 { - pub le_next: *mut NamedClockList, - pub le_prev: *mut *mut NamedClockList, -} -#[test] -fn bindgen_test_layout_NamedClockList__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(NamedClockList__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(NamedClockList__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).le_next) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(NamedClockList__bindgen_ty_1), - "::", - stringify!(le_next) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).le_prev) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(NamedClockList__bindgen_ty_1), - "::", - stringify!(le_prev) - ) - ); -} -impl Default for NamedClockList__bindgen_ty_1 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[test] -fn bindgen_test_layout_NamedClockList() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 40usize, - concat!("Size of: ", stringify!(NamedClockList)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(NamedClockList)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).name) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(NamedClockList), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).clock) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(NamedClockList), - "::", - stringify!(clock) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).output) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(NamedClockList), - "::", - stringify!(output) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).alias) as usize - ptr as usize }, - 17usize, - concat!( - "Offset of field: ", - stringify!(NamedClockList), - "::", - stringify!(alias) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).node) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(NamedClockList), - "::", - stringify!(node) - ) - ); -} -impl Default for NamedClockList { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct MemReentrancyGuard { - pub engaged_in_io: bool, -} -#[test] -fn bindgen_test_layout_MemReentrancyGuard() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 1usize, - concat!("Size of: ", stringify!(MemReentrancyGuard)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!("Alignment of ", stringify!(MemReentrancyGuard)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).engaged_in_io) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(MemReentrancyGuard), - "::", - stringify!(engaged_in_io) - ) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct NamedGPIOListHead { - pub lh_first: *mut NamedGPIOList, -} -#[test] -fn bindgen_test_layout_NamedGPIOListHead() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(NamedGPIOListHead)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(NamedGPIOListHead)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).lh_first) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(NamedGPIOListHead), - "::", - stringify!(lh_first) - ) - ); -} -impl Default for NamedGPIOListHead { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct NamedClockListHead { - pub lh_first: *mut NamedClockList, -} -#[test] -fn bindgen_test_layout_NamedClockListHead() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(NamedClockListHead)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(NamedClockListHead)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).lh_first) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(NamedClockListHead), - "::", - stringify!(lh_first) - ) - ); -} -impl Default for NamedClockListHead { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct BusStateHead { - pub lh_first: *mut BusState, -} -#[test] -fn bindgen_test_layout_BusStateHead() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(BusStateHead)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(BusStateHead)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).lh_first) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(BusStateHead), - "::", - stringify!(lh_first) - ) - ); -} -impl Default for BusStateHead { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[doc = " struct DeviceState - common device state, accessed with qdev helpers\n\n This structure should not be accessed directly. We declare it here\n so that it can be embedded in individual device state structures."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct DeviceState { - pub parent_obj: Object, - #[doc = " @id: global device id"] - pub id: *mut ::std::os::raw::c_char, - #[doc = " @canonical_path: canonical path of realized device in the QOM tree"] - pub canonical_path: *mut ::std::os::raw::c_char, - #[doc = " @realized: has device been realized?"] - pub realized: bool, - #[doc = " @pending_deleted_event: track pending deletion events during unplug"] - pub pending_deleted_event: bool, - #[doc = " @pending_deleted_expires_ms: optional timeout for deletion events"] - pub pending_deleted_expires_ms: i64, - #[doc = " @opts: QDict of options for the device"] - pub opts: *mut QDict, - #[doc = " @hotplugged: was device added after PHASE_MACHINE_READY?"] - pub hotplugged: ::std::os::raw::c_int, - #[doc = " @allow_unplug_during_migration: can device be unplugged during migration"] - pub allow_unplug_during_migration: bool, - #[doc = " @parent_bus: bus this device belongs to"] - pub parent_bus: *mut BusState, - #[doc = " @gpios: QLIST of named GPIOs the device provides."] - pub gpios: NamedGPIOListHead, - #[doc = " @clocks: QLIST of named clocks the device provides."] - pub clocks: NamedClockListHead, - #[doc = " @child_bus: QLIST of child buses"] - pub child_bus: BusStateHead, - #[doc = " @num_child_bus: number of @child_bus entries"] - pub num_child_bus: ::std::os::raw::c_int, - #[doc = " @instance_id_alias: device alias for handling legacy migration setups"] - pub instance_id_alias: ::std::os::raw::c_int, - #[doc = " @alias_required_for_version: indicates @instance_id_alias is\n needed for migration"] - pub alias_required_for_version: ::std::os::raw::c_int, - #[doc = " @reset: ResettableState for the device; handled by Resettable interface."] - pub reset: ResettableState, - #[doc = " @unplug_blockers: list of reasons to block unplugging of device"] - pub unplug_blockers: *mut GSList, - #[doc = " @mem_reentrancy_guard: Is the device currently in mmio/pio/dma?\n\n Used to prevent re-entrancy confusing things."] - pub mem_reentrancy_guard: MemReentrancyGuard, -} -#[test] -fn bindgen_test_layout_DeviceState() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 160usize, - concat!("Size of: ", stringify!(DeviceState)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(DeviceState)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).parent_obj) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(parent_obj) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).id) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).canonical_path) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(canonical_path) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).realized) as usize - ptr as usize }, - 56usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(realized) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pending_deleted_event) as usize - ptr as usize }, - 57usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(pending_deleted_event) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pending_deleted_expires_ms) as usize - ptr as usize }, - 64usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(pending_deleted_expires_ms) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).opts) as usize - ptr as usize }, - 72usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(opts) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hotplugged) as usize - ptr as usize }, - 80usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(hotplugged) - ) - ); - assert_eq!( - unsafe { - ::std::ptr::addr_of!((*ptr).allow_unplug_during_migration) as usize - ptr as usize - }, - 84usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(allow_unplug_during_migration) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).parent_bus) as usize - ptr as usize }, - 88usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(parent_bus) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gpios) as usize - ptr as usize }, - 96usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(gpios) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).clocks) as usize - ptr as usize }, - 104usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(clocks) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).child_bus) as usize - ptr as usize }, - 112usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(child_bus) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).num_child_bus) as usize - ptr as usize }, - 120usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(num_child_bus) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).instance_id_alias) as usize - ptr as usize }, - 124usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(instance_id_alias) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).alias_required_for_version) as usize - ptr as usize }, - 128usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(alias_required_for_version) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).reset) as usize - ptr as usize }, - 132usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(reset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).unplug_blockers) as usize - ptr as usize }, - 144usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(unplug_blockers) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mem_reentrancy_guard) as usize - ptr as usize }, - 152usize, - concat!( - "Offset of field: ", - stringify!(DeviceState), - "::", - stringify!(mem_reentrancy_guard) - ) - ); -} -impl Default for DeviceState { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct BusChild { - pub rcu: rcu_head, - pub child: *mut DeviceState, - pub index: ::std::os::raw::c_int, - pub sibling: BusChild__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union BusChild__bindgen_ty_1 { - pub tqe_next: *mut BusChild, - pub tqe_circ: QTailQLink, -} -#[test] -fn bindgen_test_layout_BusChild__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(BusChild__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(BusChild__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqe_next) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(BusChild__bindgen_ty_1), - "::", - stringify!(tqe_next) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqe_circ) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(BusChild__bindgen_ty_1), - "::", - stringify!(tqe_circ) - ) - ); -} -impl Default for BusChild__bindgen_ty_1 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for BusChild__bindgen_ty_1 { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "BusChild__bindgen_ty_1 {{ union }}") - } -} -#[test] -fn bindgen_test_layout_BusChild() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 48usize, - concat!("Size of: ", stringify!(BusChild)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(BusChild)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).rcu) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(BusChild), - "::", - stringify!(rcu) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).child) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(BusChild), - "::", - stringify!(child) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).index) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(BusChild), - "::", - stringify!(index) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sibling) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(BusChild), - "::", - stringify!(sibling) - ) - ); -} -impl Default for BusChild { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for BusChild { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!( - f, - "BusChild {{ rcu: {:?}, child: {:?}, index: {:?}, sibling: {:?} }}", - self.rcu, self.child, self.index, self.sibling - ) - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union BusChildHead { - pub tqh_first: *mut BusChild, - pub tqh_circ: QTailQLink, -} -#[test] -fn bindgen_test_layout_BusChildHead() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(BusChildHead)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(BusChildHead)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqh_first) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(BusChildHead), - "::", - stringify!(tqh_first) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqh_circ) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(BusChildHead), - "::", - stringify!(tqh_circ) - ) - ); -} -impl Default for BusChildHead { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for BusChildHead { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "BusChildHead {{ union }}") - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct BusStateEntry { - pub le_next: *mut BusState, - pub le_prev: *mut *mut BusState, -} -#[test] -fn bindgen_test_layout_BusStateEntry() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(BusStateEntry)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(BusStateEntry)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).le_next) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(BusStateEntry), - "::", - stringify!(le_next) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).le_prev) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(BusStateEntry), - "::", - stringify!(le_prev) - ) - ); -} -impl Default for BusStateEntry { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[doc = " struct BusState:\n @obj: parent object\n @parent: parent Device\n @name: name of bus\n @hotplug_handler: link to a hotplug handler associated with bus.\n @max_index: max number of child buses\n @realized: is the bus itself realized?\n @full: is the bus full?\n @num_children: current number of child buses"] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct BusState { - pub obj: Object, - pub parent: *mut DeviceState, - pub name: *mut ::std::os::raw::c_char, - pub hotplug_handler: *mut HotplugHandler, - pub max_index: ::std::os::raw::c_int, - pub realized: bool, - pub full: bool, - pub num_children: ::std::os::raw::c_int, - #[doc = " @children: an RCU protected QTAILQ, thus readers must use RCU\n to access it, and writers must hold the big qemu lock"] - pub children: BusChildHead, - #[doc = " @sibling: next bus"] - pub sibling: BusStateEntry, - #[doc = " @reset: ResettableState for the bus; handled by Resettable interface."] - pub reset: ResettableState, -} -#[test] -fn bindgen_test_layout_BusState() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 120usize, - concat!("Size of: ", stringify!(BusState)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(BusState)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).obj) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(BusState), - "::", - stringify!(obj) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).parent) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(BusState), - "::", - stringify!(parent) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).name) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(BusState), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hotplug_handler) as usize - ptr as usize }, - 56usize, - concat!( - "Offset of field: ", - stringify!(BusState), - "::", - stringify!(hotplug_handler) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).max_index) as usize - ptr as usize }, - 64usize, - concat!( - "Offset of field: ", - stringify!(BusState), - "::", - stringify!(max_index) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).realized) as usize - ptr as usize }, - 68usize, - concat!( - "Offset of field: ", - stringify!(BusState), - "::", - stringify!(realized) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).full) as usize - ptr as usize }, - 69usize, - concat!( - "Offset of field: ", - stringify!(BusState), - "::", - stringify!(full) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).num_children) as usize - ptr as usize }, - 72usize, - concat!( - "Offset of field: ", - stringify!(BusState), - "::", - stringify!(num_children) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).children) as usize - ptr as usize }, - 80usize, - concat!( - "Offset of field: ", - stringify!(BusState), - "::", - stringify!(children) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sibling) as usize - ptr as usize }, - 96usize, - concat!( - "Offset of field: ", - stringify!(BusState), - "::", - stringify!(sibling) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).reset) as usize - ptr as usize }, - 112usize, - concat!( - "Offset of field: ", - stringify!(BusState), - "::", - stringify!(reset) - ) - ); -} -impl Default for BusState { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for BusState { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write ! (f , "BusState {{ obj: {:?}, parent: {:?}, name: {:?}, hotplug_handler: {:?}, max_index: {:?}, realized: {:?}, full: {:?}, num_children: {:?}, children: {:?}, sibling: {:?}, reset: {:?} }}" , self . obj , self . parent , self . name , self . hotplug_handler , self . max_index , self . realized , self . full , self . num_children , self . children , self . sibling , self . reset) - } -} -pub type PTR = *mut ::std::os::raw::c_void; -pub type bfd_vma = u64; -pub type bfd_byte = u8; -pub const bfd_flavour_bfd_target_unknown_flavour: bfd_flavour = bfd_flavour(0); -pub const bfd_flavour_bfd_target_aout_flavour: bfd_flavour = bfd_flavour(1); -pub const bfd_flavour_bfd_target_coff_flavour: bfd_flavour = bfd_flavour(2); -pub const bfd_flavour_bfd_target_ecoff_flavour: bfd_flavour = bfd_flavour(3); -pub const bfd_flavour_bfd_target_elf_flavour: bfd_flavour = bfd_flavour(4); -pub const bfd_flavour_bfd_target_ieee_flavour: bfd_flavour = bfd_flavour(5); -pub const bfd_flavour_bfd_target_nlm_flavour: bfd_flavour = bfd_flavour(6); -pub const bfd_flavour_bfd_target_oasys_flavour: bfd_flavour = bfd_flavour(7); -pub const bfd_flavour_bfd_target_tekhex_flavour: bfd_flavour = bfd_flavour(8); -pub const bfd_flavour_bfd_target_srec_flavour: bfd_flavour = bfd_flavour(9); -pub const bfd_flavour_bfd_target_ihex_flavour: bfd_flavour = bfd_flavour(10); -pub const bfd_flavour_bfd_target_som_flavour: bfd_flavour = bfd_flavour(11); -pub const bfd_flavour_bfd_target_os9k_flavour: bfd_flavour = bfd_flavour(12); -pub const bfd_flavour_bfd_target_versados_flavour: bfd_flavour = bfd_flavour(13); -pub const bfd_flavour_bfd_target_msdos_flavour: bfd_flavour = bfd_flavour(14); -pub const bfd_flavour_bfd_target_evax_flavour: bfd_flavour = bfd_flavour(15); -impl ::std::ops::BitOr for bfd_flavour { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - bfd_flavour(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for bfd_flavour { - #[inline] - fn bitor_assign(&mut self, rhs: bfd_flavour) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for bfd_flavour { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - bfd_flavour(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for bfd_flavour { - #[inline] - fn bitand_assign(&mut self, rhs: bfd_flavour) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct bfd_flavour(pub ::std::os::raw::c_uint); -pub const bfd_endian_BFD_ENDIAN_BIG: bfd_endian = bfd_endian(0); -pub const bfd_endian_BFD_ENDIAN_LITTLE: bfd_endian = bfd_endian(1); -pub const bfd_endian_BFD_ENDIAN_UNKNOWN: bfd_endian = bfd_endian(2); -impl ::std::ops::BitOr for bfd_endian { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - bfd_endian(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for bfd_endian { - #[inline] - fn bitor_assign(&mut self, rhs: bfd_endian) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for bfd_endian { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - bfd_endian(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for bfd_endian { - #[inline] - fn bitand_assign(&mut self, rhs: bfd_endian) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct bfd_endian(pub ::std::os::raw::c_uint); -pub const bfd_architecture_bfd_arch_unknown: bfd_architecture = bfd_architecture(0); -pub const bfd_architecture_bfd_arch_obscure: bfd_architecture = bfd_architecture(1); -pub const bfd_architecture_bfd_arch_m68k: bfd_architecture = bfd_architecture(2); -pub const bfd_architecture_bfd_arch_vax: bfd_architecture = bfd_architecture(3); -pub const bfd_architecture_bfd_arch_i960: bfd_architecture = bfd_architecture(4); -pub const bfd_architecture_bfd_arch_a29k: bfd_architecture = bfd_architecture(5); -pub const bfd_architecture_bfd_arch_sparc: bfd_architecture = bfd_architecture(6); -pub const bfd_architecture_bfd_arch_mips: bfd_architecture = bfd_architecture(7); -pub const bfd_architecture_bfd_arch_i386: bfd_architecture = bfd_architecture(8); -pub const bfd_architecture_bfd_arch_we32k: bfd_architecture = bfd_architecture(9); -pub const bfd_architecture_bfd_arch_tahoe: bfd_architecture = bfd_architecture(10); -pub const bfd_architecture_bfd_arch_i860: bfd_architecture = bfd_architecture(11); -pub const bfd_architecture_bfd_arch_romp: bfd_architecture = bfd_architecture(12); -pub const bfd_architecture_bfd_arch_alliant: bfd_architecture = bfd_architecture(13); -pub const bfd_architecture_bfd_arch_convex: bfd_architecture = bfd_architecture(14); -pub const bfd_architecture_bfd_arch_m88k: bfd_architecture = bfd_architecture(15); -pub const bfd_architecture_bfd_arch_pyramid: bfd_architecture = bfd_architecture(16); -pub const bfd_architecture_bfd_arch_h8300: bfd_architecture = bfd_architecture(17); -pub const bfd_architecture_bfd_arch_powerpc: bfd_architecture = bfd_architecture(18); -pub const bfd_architecture_bfd_arch_rs6000: bfd_architecture = bfd_architecture(19); -pub const bfd_architecture_bfd_arch_hppa: bfd_architecture = bfd_architecture(20); -pub const bfd_architecture_bfd_arch_d10v: bfd_architecture = bfd_architecture(21); -pub const bfd_architecture_bfd_arch_z8k: bfd_architecture = bfd_architecture(22); -pub const bfd_architecture_bfd_arch_h8500: bfd_architecture = bfd_architecture(23); -pub const bfd_architecture_bfd_arch_sh: bfd_architecture = bfd_architecture(24); -pub const bfd_architecture_bfd_arch_alpha: bfd_architecture = bfd_architecture(25); -pub const bfd_architecture_bfd_arch_arm: bfd_architecture = bfd_architecture(26); -pub const bfd_architecture_bfd_arch_ns32k: bfd_architecture = bfd_architecture(27); -pub const bfd_architecture_bfd_arch_w65: bfd_architecture = bfd_architecture(28); -pub const bfd_architecture_bfd_arch_tic30: bfd_architecture = bfd_architecture(29); -pub const bfd_architecture_bfd_arch_v850: bfd_architecture = bfd_architecture(30); -pub const bfd_architecture_bfd_arch_arc: bfd_architecture = bfd_architecture(31); -pub const bfd_architecture_bfd_arch_m32r: bfd_architecture = bfd_architecture(32); -pub const bfd_architecture_bfd_arch_mn10200: bfd_architecture = bfd_architecture(33); -pub const bfd_architecture_bfd_arch_mn10300: bfd_architecture = bfd_architecture(34); -pub const bfd_architecture_bfd_arch_avr: bfd_architecture = bfd_architecture(35); -pub const bfd_architecture_bfd_arch_cris: bfd_architecture = bfd_architecture(36); -pub const bfd_architecture_bfd_arch_microblaze: bfd_architecture = bfd_architecture(37); -pub const bfd_architecture_bfd_arch_moxie: bfd_architecture = bfd_architecture(38); -pub const bfd_architecture_bfd_arch_ia64: bfd_architecture = bfd_architecture(39); -pub const bfd_architecture_bfd_arch_nios2: bfd_architecture = bfd_architecture(40); -pub const bfd_architecture_bfd_arch_rx: bfd_architecture = bfd_architecture(41); -pub const bfd_architecture_bfd_arch_loongarch: bfd_architecture = bfd_architecture(42); -pub const bfd_architecture_bfd_arch_last: bfd_architecture = bfd_architecture(43); -impl ::std::ops::BitOr for bfd_architecture { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - bfd_architecture(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for bfd_architecture { - #[inline] - fn bitor_assign(&mut self, rhs: bfd_architecture) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for bfd_architecture { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - bfd_architecture(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for bfd_architecture { - #[inline] - fn bitand_assign(&mut self, rhs: bfd_architecture) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct bfd_architecture(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Copy, Clone)] -pub struct symbol_cache_entry { - pub name: *const ::std::os::raw::c_char, - pub udata: symbol_cache_entry__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union symbol_cache_entry__bindgen_ty_1 { - pub p: PTR, - pub i: bfd_vma, -} -#[test] -fn bindgen_test_layout_symbol_cache_entry__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(symbol_cache_entry__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!( - "Alignment of ", - stringify!(symbol_cache_entry__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).p) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(symbol_cache_entry__bindgen_ty_1), - "::", - stringify!(p) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).i) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(symbol_cache_entry__bindgen_ty_1), - "::", - stringify!(i) - ) - ); -} -impl Default for symbol_cache_entry__bindgen_ty_1 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for symbol_cache_entry__bindgen_ty_1 { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "symbol_cache_entry__bindgen_ty_1 {{ union }}") - } -} -#[test] -fn bindgen_test_layout_symbol_cache_entry() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(symbol_cache_entry)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(symbol_cache_entry)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).name) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(symbol_cache_entry), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).udata) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(symbol_cache_entry), - "::", - stringify!(udata) - ) - ); -} -impl Default for symbol_cache_entry { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for symbol_cache_entry { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!( - f, - "symbol_cache_entry {{ name: {:?}, udata: {:?} }}", - self.name, self.udata - ) - } -} -pub type asymbol = symbol_cache_entry; -pub type fprintf_function = ::std::option::Option< - unsafe extern "C" fn( - f: *mut FILE, - fmt: *const ::std::os::raw::c_char, - ... - ) -> ::std::os::raw::c_int, ->; -pub const dis_insn_type_dis_noninsn: dis_insn_type = dis_insn_type(0); -pub const dis_insn_type_dis_nonbranch: dis_insn_type = dis_insn_type(1); -pub const dis_insn_type_dis_branch: dis_insn_type = dis_insn_type(2); -pub const dis_insn_type_dis_condbranch: dis_insn_type = dis_insn_type(3); -pub const dis_insn_type_dis_jsr: dis_insn_type = dis_insn_type(4); -pub const dis_insn_type_dis_condjsr: dis_insn_type = dis_insn_type(5); -pub const dis_insn_type_dis_dref: dis_insn_type = dis_insn_type(6); -pub const dis_insn_type_dis_dref2: dis_insn_type = dis_insn_type(7); -impl ::std::ops::BitOr for dis_insn_type { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - dis_insn_type(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for dis_insn_type { - #[inline] - fn bitor_assign(&mut self, rhs: dis_insn_type) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for dis_insn_type { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - dis_insn_type(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for dis_insn_type { - #[inline] - fn bitand_assign(&mut self, rhs: dis_insn_type) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct dis_insn_type(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct disassemble_info { - pub fprintf_func: fprintf_function, - pub stream: *mut FILE, - pub application_data: PTR, - pub flavour: bfd_flavour, - pub arch: bfd_architecture, - pub mach: ::std::os::raw::c_ulong, - pub endian: bfd_endian, - pub symbols: *mut *mut asymbol, - pub num_symbols: ::std::os::raw::c_int, - pub flags: ::std::os::raw::c_ulong, - pub private_data: PTR, - pub read_memory_func: ::std::option::Option< - unsafe extern "C" fn( - memaddr: bfd_vma, - myaddr: *mut bfd_byte, - length: ::std::os::raw::c_int, - info: *mut disassemble_info, - ) -> ::std::os::raw::c_int, - >, - pub memory_error_func: ::std::option::Option< - unsafe extern "C" fn( - status: ::std::os::raw::c_int, - memaddr: bfd_vma, - info: *mut disassemble_info, - ), - >, - pub print_address_func: - ::std::option::Option, - pub print_insn: ::std::option::Option< - unsafe extern "C" fn(addr: bfd_vma, info: *mut disassemble_info) -> ::std::os::raw::c_int, - >, - pub symbol_at_address_func: ::std::option::Option< - unsafe extern "C" fn(addr: bfd_vma, info: *mut disassemble_info) -> ::std::os::raw::c_int, - >, - pub buffer: *const bfd_byte, - pub buffer_vma: bfd_vma, - pub buffer_length: ::std::os::raw::c_int, - pub bytes_per_line: ::std::os::raw::c_int, - pub bytes_per_chunk: ::std::os::raw::c_int, - pub display_endian: bfd_endian, - pub insn_info_valid: ::std::os::raw::c_char, - pub branch_delay_insns: ::std::os::raw::c_char, - pub data_size: ::std::os::raw::c_char, - pub insn_type: dis_insn_type, - pub target: bfd_vma, - pub target2: bfd_vma, - pub disassembler_options: *mut ::std::os::raw::c_char, - pub show_opcodes: bool, - pub target_info: *mut ::std::os::raw::c_void, - pub cap_arch: ::std::os::raw::c_int, - pub cap_mode: ::std::os::raw::c_int, - pub cap_insn_unit: ::std::os::raw::c_int, - pub cap_insn_split: ::std::os::raw::c_int, -} -#[test] -fn bindgen_test_layout_disassemble_info() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 216usize, - concat!("Size of: ", stringify!(disassemble_info)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(disassemble_info)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fprintf_func) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(fprintf_func) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).stream) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(stream) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).application_data) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(application_data) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).flavour) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(flavour) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).arch) as usize - ptr as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(arch) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mach) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(mach) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).endian) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(endian) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).symbols) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(symbols) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).num_symbols) as usize - ptr as usize }, - 56usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(num_symbols) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, - 64usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).private_data) as usize - ptr as usize }, - 72usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(private_data) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).read_memory_func) as usize - ptr as usize }, - 80usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(read_memory_func) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).memory_error_func) as usize - ptr as usize }, - 88usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(memory_error_func) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).print_address_func) as usize - ptr as usize }, - 96usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(print_address_func) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).print_insn) as usize - ptr as usize }, - 104usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(print_insn) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).symbol_at_address_func) as usize - ptr as usize }, - 112usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(symbol_at_address_func) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).buffer) as usize - ptr as usize }, - 120usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(buffer) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).buffer_vma) as usize - ptr as usize }, - 128usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(buffer_vma) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).buffer_length) as usize - ptr as usize }, - 136usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(buffer_length) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).bytes_per_line) as usize - ptr as usize }, - 140usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(bytes_per_line) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).bytes_per_chunk) as usize - ptr as usize }, - 144usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(bytes_per_chunk) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).display_endian) as usize - ptr as usize }, - 148usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(display_endian) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).insn_info_valid) as usize - ptr as usize }, - 152usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(insn_info_valid) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).branch_delay_insns) as usize - ptr as usize }, - 153usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(branch_delay_insns) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).data_size) as usize - ptr as usize }, - 154usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(data_size) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).insn_type) as usize - ptr as usize }, - 156usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(insn_type) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).target) as usize - ptr as usize }, - 160usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(target) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).target2) as usize - ptr as usize }, - 168usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(target2) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).disassembler_options) as usize - ptr as usize }, - 176usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(disassembler_options) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).show_opcodes) as usize - ptr as usize }, - 184usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(show_opcodes) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).target_info) as usize - ptr as usize }, - 192usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(target_info) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cap_arch) as usize - ptr as usize }, - 200usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(cap_arch) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cap_mode) as usize - ptr as usize }, - 204usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(cap_mode) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cap_insn_unit) as usize - ptr as usize }, - 208usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(cap_insn_unit) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cap_insn_split) as usize - ptr as usize }, - 212usize, - concat!( - "Offset of field: ", - stringify!(disassemble_info), - "::", - stringify!(cap_insn_split) - ) - ); -} -impl Default for disassemble_info { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -pub type hwaddr = u64; -#[doc = " vaddr:\n Type wide enough to contain any #target_ulong virtual address."] -pub type vaddr = u64; -#[repr(C)] -#[derive(Copy, Clone)] -pub union CPUTLBEntry { - pub __bindgen_anon_1: CPUTLBEntry__bindgen_ty_1, - pub addr_idx: [u64; 4usize], -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct CPUTLBEntry__bindgen_ty_1 { - pub addr_read: u64, - pub addr_write: u64, - pub addr_code: u64, - pub addend: usize, -} -#[test] -fn bindgen_test_layout_CPUTLBEntry__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(CPUTLBEntry__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUTLBEntry__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).addr_read) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntry__bindgen_ty_1), - "::", - stringify!(addr_read) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).addr_write) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntry__bindgen_ty_1), - "::", - stringify!(addr_write) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).addr_code) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntry__bindgen_ty_1), - "::", - stringify!(addr_code) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).addend) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntry__bindgen_ty_1), - "::", - stringify!(addend) - ) - ); -} -#[test] -fn bindgen_test_layout_CPUTLBEntry() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(CPUTLBEntry)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUTLBEntry)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).addr_idx) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntry), - "::", - stringify!(addr_idx) - ) - ); -} -impl Default for CPUTLBEntry { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUTLBEntry { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "CPUTLBEntry {{ union }}") - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct CPUTLBDescFast { - pub mask: usize, - pub table: *mut CPUTLBEntry, -} -#[test] -fn bindgen_test_layout_CPUTLBDescFast() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(CPUTLBDescFast)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUTLBDescFast)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mask) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBDescFast), - "::", - stringify!(mask) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).table) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBDescFast), - "::", - stringify!(table) - ) - ); -} -impl Default for CPUTLBDescFast { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -pub const ShutdownCause_SHUTDOWN_CAUSE_NONE: ShutdownCause = ShutdownCause(0); -pub const ShutdownCause_SHUTDOWN_CAUSE_HOST_ERROR: ShutdownCause = ShutdownCause(1); -pub const ShutdownCause_SHUTDOWN_CAUSE_HOST_QMP_QUIT: ShutdownCause = ShutdownCause(2); -pub const ShutdownCause_SHUTDOWN_CAUSE_HOST_QMP_SYSTEM_RESET: ShutdownCause = ShutdownCause(3); -pub const ShutdownCause_SHUTDOWN_CAUSE_HOST_SIGNAL: ShutdownCause = ShutdownCause(4); -pub const ShutdownCause_SHUTDOWN_CAUSE_HOST_UI: ShutdownCause = ShutdownCause(5); -pub const ShutdownCause_SHUTDOWN_CAUSE_GUEST_SHUTDOWN: ShutdownCause = ShutdownCause(6); -pub const ShutdownCause_SHUTDOWN_CAUSE_GUEST_RESET: ShutdownCause = ShutdownCause(7); -pub const ShutdownCause_SHUTDOWN_CAUSE_GUEST_PANIC: ShutdownCause = ShutdownCause(8); -pub const ShutdownCause_SHUTDOWN_CAUSE_SUBSYSTEM_RESET: ShutdownCause = ShutdownCause(9); -pub const ShutdownCause_SHUTDOWN_CAUSE_SNAPSHOT_LOAD: ShutdownCause = ShutdownCause(10); -pub const ShutdownCause_SHUTDOWN_CAUSE__MAX: ShutdownCause = ShutdownCause(11); -impl ::std::ops::BitOr for ShutdownCause { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - ShutdownCause(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for ShutdownCause { - #[inline] - fn bitor_assign(&mut self, rhs: ShutdownCause) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for ShutdownCause { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - ShutdownCause(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for ShutdownCause { - #[inline] - fn bitand_assign(&mut self, rhs: ShutdownCause) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct ShutdownCause(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct SysemuCPUOps { - _unused: [u8; 0], -} -#[doc = " CPUClass:\n @class_by_name: Callback to map -cpu command line model name to an\n instantiatable CPU type.\n @parse_features: Callback to parse command line arguments.\n @reset_dump_flags: #CPUDumpFlags to use for reset logging.\n @has_work: Callback for checking if there is work to do.\n @mmu_index: Callback for choosing softmmu mmu index;\n may be used internally by memory_rw_debug without TCG.\n @memory_rw_debug: Callback for GDB memory access.\n @dump_state: Callback for dumping state.\n @query_cpu_fast:\n Fill in target specific information for the \"query-cpus-fast\"\n QAPI call.\n @get_arch_id: Callback for getting architecture-dependent CPU ID.\n @set_pc: Callback for setting the Program Counter register. This\n should have the semantics used by the target architecture when\n setting the PC from a source such as an ELF file entry point;\n for example on Arm it will also set the Thumb mode bit based\n on the least significant bit of the new PC value.\n If the target behaviour here is anything other than \"set\n the PC register to the value passed in\" then the target must\n also implement the synchronize_from_tb hook.\n @get_pc: Callback for getting the Program Counter register.\n As above, with the semantics of the target architecture.\n @gdb_read_register: Callback for letting GDB read a register.\n @gdb_write_register: Callback for letting GDB write a register.\n @gdb_adjust_breakpoint: Callback for adjusting the address of a\n breakpoint. Used by AVR to handle a gdb mis-feature with\n its Harvard architecture split code and data.\n @gdb_num_core_regs: Number of core registers accessible to GDB or 0 to infer\n from @gdb_core_xml_file.\n @gdb_core_xml_file: File name for core registers GDB XML description.\n @gdb_stop_before_watchpoint: Indicates whether GDB expects the CPU to stop\n before the insn which triggers a watchpoint rather than after it.\n @gdb_arch_name: Optional callback that returns the architecture name known\n to GDB. The caller must free the returned string with g_free.\n @disas_set_info: Setup architecture specific components of disassembly info\n @adjust_watchpoint_address: Perform a target-specific adjustment to an\n address before attempting to match it against watchpoints.\n @deprecation_note: If this CPUClass is deprecated, this field provides\n related information.\n\n Represents a CPU family or model."] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct CPUClass { - pub parent_class: DeviceClass, - pub class_by_name: ::std::option::Option< - unsafe extern "C" fn(cpu_model: *const ::std::os::raw::c_char) -> *mut ObjectClass, - >, - pub parse_features: ::std::option::Option< - unsafe extern "C" fn( - typename: *const ::std::os::raw::c_char, - str_: *mut ::std::os::raw::c_char, - errp: *mut *mut Error, - ), - >, - pub has_work: ::std::option::Option bool>, - pub mmu_index: ::std::option::Option< - unsafe extern "C" fn(cpu: *mut CPUState, ifetch: bool) -> ::std::os::raw::c_int, - >, - pub memory_rw_debug: ::std::option::Option< - unsafe extern "C" fn( - cpu: *mut CPUState, - addr: vaddr, - buf: *mut u8, - len: ::std::os::raw::c_int, - is_write: bool, - ) -> ::std::os::raw::c_int, - >, - pub dump_state: ::std::option::Option< - unsafe extern "C" fn(cpu: *mut CPUState, arg1: *mut FILE, flags: ::std::os::raw::c_int), - >, - pub query_cpu_fast: - ::std::option::Option, - pub get_arch_id: ::std::option::Option i64>, - pub set_pc: ::std::option::Option, - pub get_pc: ::std::option::Option vaddr>, - pub gdb_read_register: ::std::option::Option< - unsafe extern "C" fn( - cpu: *mut CPUState, - buf: *mut GByteArray, - reg: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int, - >, - pub gdb_write_register: ::std::option::Option< - unsafe extern "C" fn( - cpu: *mut CPUState, - buf: *mut u8, - reg: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int, - >, - pub gdb_adjust_breakpoint: - ::std::option::Option vaddr>, - pub gdb_core_xml_file: *const ::std::os::raw::c_char, - pub gdb_arch_name: - ::std::option::Option *const gchar>, - pub disas_set_info: ::std::option::Option< - unsafe extern "C" fn(cpu: *mut CPUState, info: *mut disassemble_info), - >, - pub deprecation_note: *const ::std::os::raw::c_char, - pub accel_cpu: *mut AccelCPUClass, - pub sysemu_ops: *const SysemuCPUOps, - pub tcg_ops: *const TCGCPUOps, - pub init_accel_cpu: ::std::option::Option< - unsafe extern "C" fn(accel_cpu: *mut AccelCPUClass, cc: *mut CPUClass), - >, - pub reset_dump_flags: ::std::os::raw::c_int, - pub gdb_num_core_regs: ::std::os::raw::c_int, - pub gdb_stop_before_watchpoint: bool, -} -#[test] -fn bindgen_test_layout_CPUClass() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 360usize, - concat!("Size of: ", stringify!(CPUClass)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUClass)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).parent_class) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(parent_class) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).class_by_name) as usize - ptr as usize }, - 176usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(class_by_name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).parse_features) as usize - ptr as usize }, - 184usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(parse_features) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).has_work) as usize - ptr as usize }, - 192usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(has_work) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mmu_index) as usize - ptr as usize }, - 200usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(mmu_index) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).memory_rw_debug) as usize - ptr as usize }, - 208usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(memory_rw_debug) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).dump_state) as usize - ptr as usize }, - 216usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(dump_state) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).query_cpu_fast) as usize - ptr as usize }, - 224usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(query_cpu_fast) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).get_arch_id) as usize - ptr as usize }, - 232usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(get_arch_id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).set_pc) as usize - ptr as usize }, - 240usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(set_pc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).get_pc) as usize - ptr as usize }, - 248usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(get_pc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gdb_read_register) as usize - ptr as usize }, - 256usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(gdb_read_register) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gdb_write_register) as usize - ptr as usize }, - 264usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(gdb_write_register) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gdb_adjust_breakpoint) as usize - ptr as usize }, - 272usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(gdb_adjust_breakpoint) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gdb_core_xml_file) as usize - ptr as usize }, - 280usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(gdb_core_xml_file) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gdb_arch_name) as usize - ptr as usize }, - 288usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(gdb_arch_name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).disas_set_info) as usize - ptr as usize }, - 296usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(disas_set_info) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).deprecation_note) as usize - ptr as usize }, - 304usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(deprecation_note) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).accel_cpu) as usize - ptr as usize }, - 312usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(accel_cpu) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sysemu_ops) as usize - ptr as usize }, - 320usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(sysemu_ops) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tcg_ops) as usize - ptr as usize }, - 328usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(tcg_ops) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).init_accel_cpu) as usize - ptr as usize }, - 336usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(init_accel_cpu) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).reset_dump_flags) as usize - ptr as usize }, - 344usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(reset_dump_flags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gdb_num_core_regs) as usize - ptr as usize }, - 348usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(gdb_num_core_regs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gdb_stop_before_watchpoint) as usize - ptr as usize }, - 352usize, - concat!( - "Offset of field: ", - stringify!(CPUClass), - "::", - stringify!(gdb_stop_before_watchpoint) - ) - ); -} -impl Default for CPUClass { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CPUTLBEntryFull { - pub xlat_section: hwaddr, - pub phys_addr: hwaddr, - pub attrs: MemTxAttrs, - pub prot: u8, - pub lg_page_size: u8, - pub tlb_fill_flags: u8, - pub slow_flags: [u8; 3usize], - pub extra: CPUTLBEntryFull__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CPUTLBEntryFull__bindgen_ty_1 { - pub arm: CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1, -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1 { - pub pte_attrs: u8, - pub shareability: u8, - pub guarded: bool, -} -#[test] -fn bindgen_test_layout_CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 3usize, - concat!( - "Size of: ", - stringify!(CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1) - ) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!( - "Alignment of ", - stringify!(CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pte_attrs) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1), - "::", - stringify!(pte_attrs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).shareability) as usize - ptr as usize }, - 1usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1), - "::", - stringify!(shareability) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).guarded) as usize - ptr as usize }, - 2usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull__bindgen_ty_1__bindgen_ty_1), - "::", - stringify!(guarded) - ) - ); -} -#[test] -fn bindgen_test_layout_CPUTLBEntryFull__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 3usize, - concat!("Size of: ", stringify!(CPUTLBEntryFull__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!("Alignment of ", stringify!(CPUTLBEntryFull__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).arm) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull__bindgen_ty_1), - "::", - stringify!(arm) - ) - ); -} -impl Default for CPUTLBEntryFull__bindgen_ty_1 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUTLBEntryFull__bindgen_ty_1 { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "CPUTLBEntryFull__bindgen_ty_1 {{ union }}") - } -} -#[test] -fn bindgen_test_layout_CPUTLBEntryFull() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(CPUTLBEntryFull)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUTLBEntryFull)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).xlat_section) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull), - "::", - stringify!(xlat_section) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).phys_addr) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull), - "::", - stringify!(phys_addr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).attrs) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull), - "::", - stringify!(attrs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).prot) as usize - ptr as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull), - "::", - stringify!(prot) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).lg_page_size) as usize - ptr as usize }, - 21usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull), - "::", - stringify!(lg_page_size) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tlb_fill_flags) as usize - ptr as usize }, - 22usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull), - "::", - stringify!(tlb_fill_flags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).slow_flags) as usize - ptr as usize }, - 23usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull), - "::", - stringify!(slow_flags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).extra) as usize - ptr as usize }, - 26usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBEntryFull), - "::", - stringify!(extra) - ) - ); -} -impl Default for CPUTLBEntryFull { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUTLBEntryFull { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!( - f, - "CPUTLBEntryFull {{ attrs: {:?}, slow_flags: {:?}, extra: {:?} }}", - self.attrs, self.slow_flags, self.extra - ) - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CPUTLBDesc { - pub large_page_addr: vaddr, - pub large_page_mask: vaddr, - pub window_begin_ns: i64, - pub window_max_entries: usize, - pub n_used_entries: usize, - pub vindex: usize, - pub vtable: [CPUTLBEntry; 8usize], - pub vfulltlb: [CPUTLBEntryFull; 8usize], - pub fulltlb: *mut CPUTLBEntryFull, -} -#[test] -fn bindgen_test_layout_CPUTLBDesc() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 568usize, - concat!("Size of: ", stringify!(CPUTLBDesc)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUTLBDesc)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).large_page_addr) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBDesc), - "::", - stringify!(large_page_addr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).large_page_mask) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBDesc), - "::", - stringify!(large_page_mask) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).window_begin_ns) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBDesc), - "::", - stringify!(window_begin_ns) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).window_max_entries) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBDesc), - "::", - stringify!(window_max_entries) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).n_used_entries) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBDesc), - "::", - stringify!(n_used_entries) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).vindex) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBDesc), - "::", - stringify!(vindex) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).vtable) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBDesc), - "::", - stringify!(vtable) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).vfulltlb) as usize - ptr as usize }, - 304usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBDesc), - "::", - stringify!(vfulltlb) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fulltlb) as usize - ptr as usize }, - 560usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBDesc), - "::", - stringify!(fulltlb) - ) - ); -} -impl Default for CPUTLBDesc { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUTLBDesc { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!( - f, - "CPUTLBDesc {{ vtable: {:?}, vfulltlb: {:?}, fulltlb: {:?} }}", - self.vtable, self.vfulltlb, self.fulltlb - ) - } -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct CPUTLBCommon { - pub lock: QemuSpin, - pub dirty: u16, - pub full_flush_count: usize, - pub part_flush_count: usize, - pub elide_flush_count: usize, -} -#[test] -fn bindgen_test_layout_CPUTLBCommon() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(CPUTLBCommon)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUTLBCommon)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).lock) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBCommon), - "::", - stringify!(lock) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).dirty) as usize - ptr as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBCommon), - "::", - stringify!(dirty) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).full_flush_count) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBCommon), - "::", - stringify!(full_flush_count) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).part_flush_count) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBCommon), - "::", - stringify!(part_flush_count) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).elide_flush_count) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(CPUTLBCommon), - "::", - stringify!(elide_flush_count) - ) - ); -} -#[repr(C)] -#[repr(align(16))] -#[derive(Copy, Clone)] -pub struct CPUTLB { - pub c: CPUTLBCommon, - pub d: [CPUTLBDesc; 16usize], - pub f: [CPUTLBDescFast; 16usize], -} -#[test] -fn bindgen_test_layout_CPUTLB() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 9376usize, - concat!("Size of: ", stringify!(CPUTLB)) - ); - assert_eq!( - ::std::mem::align_of::(), - 16usize, - concat!("Alignment of ", stringify!(CPUTLB)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).c) as usize - ptr as usize }, - 0usize, - concat!("Offset of field: ", stringify!(CPUTLB), "::", stringify!(c)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).d) as usize - ptr as usize }, - 32usize, - concat!("Offset of field: ", stringify!(CPUTLB), "::", stringify!(d)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).f) as usize - ptr as usize }, - 9120usize, - concat!("Offset of field: ", stringify!(CPUTLB), "::", stringify!(f)) - ); -} -impl Default for CPUTLB { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUTLB { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!( - f, - "CPUTLB {{ c: {:?}, d: {:?}, f: {:?} }}", - self.c, self.d, self.f - ) - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union IcountDecr { - pub u32_: u32, - pub u16_: IcountDecr__bindgen_ty_1, -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct IcountDecr__bindgen_ty_1 { - pub low: u16, - pub high: u16, -} -#[test] -fn bindgen_test_layout_IcountDecr__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 4usize, - concat!("Size of: ", stringify!(IcountDecr__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 2usize, - concat!("Alignment of ", stringify!(IcountDecr__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).low) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(IcountDecr__bindgen_ty_1), - "::", - stringify!(low) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).high) as usize - ptr as usize }, - 2usize, - concat!( - "Offset of field: ", - stringify!(IcountDecr__bindgen_ty_1), - "::", - stringify!(high) - ) - ); -} -#[test] -fn bindgen_test_layout_IcountDecr() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 4usize, - concat!("Size of: ", stringify!(IcountDecr)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(IcountDecr)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).u32_) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(IcountDecr), - "::", - stringify!(u32_) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).u16_) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(IcountDecr), - "::", - stringify!(u16_) - ) - ); -} -impl Default for IcountDecr { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for IcountDecr { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "IcountDecr {{ union }}") - } -} -#[repr(C)] -#[repr(align(16))] -#[derive(Copy, Clone)] -pub struct CPUNegativeOffsetState { - pub tlb: CPUTLB, - pub icount_decr: IcountDecr, - pub can_do_io: bool, -} -#[test] -fn bindgen_test_layout_CPUNegativeOffsetState() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 9392usize, - concat!("Size of: ", stringify!(CPUNegativeOffsetState)) - ); - assert_eq!( - ::std::mem::align_of::(), - 16usize, - concat!("Alignment of ", stringify!(CPUNegativeOffsetState)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tlb) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUNegativeOffsetState), - "::", - stringify!(tlb) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).icount_decr) as usize - ptr as usize }, - 9376usize, - concat!( - "Offset of field: ", - stringify!(CPUNegativeOffsetState), - "::", - stringify!(icount_decr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).can_do_io) as usize - ptr as usize }, - 9380usize, - concat!( - "Offset of field: ", - stringify!(CPUNegativeOffsetState), - "::", - stringify!(can_do_io) - ) - ); -} -impl Default for CPUNegativeOffsetState { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUNegativeOffsetState { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!( - f, - "CPUNegativeOffsetState {{ tlb: {:?}, icount_decr: {:?}, can_do_io: {:?} }}", - self.tlb, self.icount_decr, self.can_do_io - ) - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CPUBreakpoint { - pub pc: vaddr, - pub flags: ::std::os::raw::c_int, - pub entry: CPUBreakpoint__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CPUBreakpoint__bindgen_ty_1 { - pub tqe_next: *mut CPUBreakpoint, - pub tqe_circ: QTailQLink, -} -#[test] -fn bindgen_test_layout_CPUBreakpoint__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(CPUBreakpoint__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUBreakpoint__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqe_next) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUBreakpoint__bindgen_ty_1), - "::", - stringify!(tqe_next) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqe_circ) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUBreakpoint__bindgen_ty_1), - "::", - stringify!(tqe_circ) - ) - ); -} -impl Default for CPUBreakpoint__bindgen_ty_1 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUBreakpoint__bindgen_ty_1 { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "CPUBreakpoint__bindgen_ty_1 {{ union }}") - } -} -#[test] -fn bindgen_test_layout_CPUBreakpoint() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(CPUBreakpoint)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUBreakpoint)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pc) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUBreakpoint), - "::", - stringify!(pc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(CPUBreakpoint), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).entry) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(CPUBreakpoint), - "::", - stringify!(entry) - ) - ); -} -impl Default for CPUBreakpoint { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUBreakpoint { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!( - f, - "CPUBreakpoint {{ flags: {:?}, entry: {:?} }}", - self.flags, self.entry - ) - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct CPUWatchpoint { - pub vaddr: vaddr, - pub len: vaddr, - pub hitaddr: vaddr, - pub hitattrs: MemTxAttrs, - pub flags: ::std::os::raw::c_int, - pub entry: CPUWatchpoint__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CPUWatchpoint__bindgen_ty_1 { - pub tqe_next: *mut CPUWatchpoint, - pub tqe_circ: QTailQLink, -} -#[test] -fn bindgen_test_layout_CPUWatchpoint__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(CPUWatchpoint__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUWatchpoint__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqe_next) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUWatchpoint__bindgen_ty_1), - "::", - stringify!(tqe_next) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqe_circ) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUWatchpoint__bindgen_ty_1), - "::", - stringify!(tqe_circ) - ) - ); -} -impl Default for CPUWatchpoint__bindgen_ty_1 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUWatchpoint__bindgen_ty_1 { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "CPUWatchpoint__bindgen_ty_1 {{ union }}") - } -} -#[test] -fn bindgen_test_layout_CPUWatchpoint() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 48usize, - concat!("Size of: ", stringify!(CPUWatchpoint)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUWatchpoint)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).vaddr) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUWatchpoint), - "::", - stringify!(vaddr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).len) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(CPUWatchpoint), - "::", - stringify!(len) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hitaddr) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(CPUWatchpoint), - "::", - stringify!(hitaddr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hitattrs) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(CPUWatchpoint), - "::", - stringify!(hitattrs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, - 28usize, - concat!( - "Offset of field: ", - stringify!(CPUWatchpoint), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).entry) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(CPUWatchpoint), - "::", - stringify!(entry) - ) - ); -} -impl Default for CPUWatchpoint { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUWatchpoint { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!( - f, - "CPUWatchpoint {{ hitattrs: {:?}, flags: {:?}, entry: {:?} }}", - self.hitattrs, self.flags, self.entry - ) - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct KVMState { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct kvm_run { - _unused: [u8; 0], -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct qemu_work_item { - _unused: [u8; 0], -} -#[doc = " CPUState:\n @cpu_index: CPU index (informative).\n @cluster_index: Identifies which cluster this CPU is in.\n For boards which don't define clusters or for \"loose\" CPUs not assigned\n to a cluster this will be UNASSIGNED_CLUSTER_INDEX; otherwise it will\n be the same as the cluster-id property of the CPU object's TYPE_CPU_CLUSTER\n QOM parent.\n Under TCG this value is propagated to @tcg_cflags.\n See TranslationBlock::TCG CF_CLUSTER_MASK.\n @tcg_cflags: Pre-computed cflags for this cpu.\n @nr_cores: Number of cores within this CPU package.\n @nr_threads: Number of threads within this CPU core.\n @running: #true if CPU is currently running (lockless).\n @has_waiter: #true if a CPU is currently waiting for the cpu_exec_end;\n valid under cpu_list_lock.\n @created: Indicates whether the CPU thread has been successfully created.\n @interrupt_request: Indicates a pending interrupt request.\n @halted: Nonzero if the CPU is in suspended state.\n @stop: Indicates a pending stop request.\n @stopped: Indicates the CPU has been artificially stopped.\n @unplug: Indicates a pending CPU unplug request.\n @crash_occurred: Indicates the OS reported a crash (panic) for this CPU\n @singlestep_enabled: Flags for single-stepping.\n @icount_extra: Instructions until next timer event.\n @neg.can_do_io: True if memory-mapped IO is allowed.\n @cpu_ases: Pointer to array of CPUAddressSpaces (which define the\n AddressSpaces this CPU has)\n @num_ases: number of CPUAddressSpaces in @cpu_ases\n @as: Pointer to the first AddressSpace, for the convenience of targets which\n only have a single AddressSpace\n @gdb_regs: Additional GDB registers.\n @gdb_num_regs: Number of total registers accessible to GDB.\n @gdb_num_g_regs: Number of registers in GDB 'g' packets.\n @node: QTAILQ of CPUs sharing TB cache.\n @opaque: User data.\n @mem_io_pc: Host Program Counter at which the memory was accessed.\n @accel: Pointer to accelerator specific state.\n @kvm_fd: vCPU file descriptor for KVM.\n @work_mutex: Lock to prevent multiple access to @work_list.\n @work_list: List of pending asynchronous work.\n @plugin_mem_cbs: active plugin memory callbacks\n @plugin_state: per-CPU plugin state\n @ignore_memory_transaction_failures: Cached copy of the MachineState\n flag of the same name: allows the board to suppress calling of the\n CPU do_transaction_failed hook function.\n @kvm_dirty_gfns: Points to the KVM dirty ring for this CPU when KVM dirty\n ring is enabled.\n @kvm_fetch_index: Keeps the index that we last fetched from the per-vCPU\n dirty ring structure.\n\n State of one CPU core or thread.\n\n Align, in order to match possible alignment required by CPUArchState,\n and eliminate a hole between CPUState and CPUArchState within ArchCPU."] -#[repr(C)] -#[repr(align(16))] -pub struct CPUState { - pub parent_obj: DeviceState, - pub cc: *mut CPUClass, - pub nr_cores: ::std::os::raw::c_int, - pub nr_threads: ::std::os::raw::c_int, - pub thread: *mut QemuThread, - pub thread_id: ::std::os::raw::c_int, - pub running: bool, - pub has_waiter: bool, - pub halt_cond: *mut QemuCond, - pub thread_kicked: bool, - pub created: bool, - pub stop: bool, - pub stopped: bool, - pub start_powered_off: bool, - pub unplug: bool, - pub crash_occurred: bool, - pub exit_request: bool, - pub exclusive_context_count: ::std::os::raw::c_int, - pub cflags_next_tb: u32, - pub interrupt_request: u32, - pub singlestep_enabled: ::std::os::raw::c_int, - pub icount_budget: i64, - pub icount_extra: i64, - pub random_seed: u64, - pub jmp_env: sigjmp_buf, - pub work_mutex: QemuMutex, - pub work_list: CPUState__bindgen_ty_1, - pub cpu_ases: *mut CPUAddressSpace, - pub num_ases: ::std::os::raw::c_int, - pub as_: *mut AddressSpace, - pub memory: *mut MemoryRegion, - pub tb_jmp_cache: *mut CPUJumpCache, - pub gdb_regs: *mut GArray, - pub gdb_num_regs: ::std::os::raw::c_int, - pub gdb_num_g_regs: ::std::os::raw::c_int, - pub node: CPUState__bindgen_ty_2, - pub breakpoints: CPUState__bindgen_ty_3, - pub watchpoints: CPUState__bindgen_ty_4, - pub watchpoint_hit: *mut CPUWatchpoint, - pub opaque: *mut ::std::os::raw::c_void, - pub mem_io_pc: usize, - pub kvm_fd: ::std::os::raw::c_int, - pub kvm_state: *mut KVMState, - pub kvm_run: *mut kvm_run, - pub kvm_dirty_gfns: *mut kvm_dirty_gfn, - pub kvm_fetch_index: u32, - pub dirty_pages: u64, - pub kvm_vcpu_stats_fd: ::std::os::raw::c_int, - pub in_ioctl_lock: QemuLockCnt, - pub plugin_mem_cbs: *mut GArray, - pub plugin_state: *mut CPUPluginState, - pub cpu_index: ::std::os::raw::c_int, - pub cluster_index: ::std::os::raw::c_int, - pub tcg_cflags: u32, - pub halted: u32, - pub exception_index: i32, - pub accel: *mut AccelCPUState, - pub vcpu_dirty: bool, - pub throttle_thread_scheduled: bool, - pub throttle_us_per_full: i64, - pub ignore_memory_transaction_failures: bool, - pub prctl_unalign_sigbus: bool, - pub iommu_notifiers: *mut GArray, - pub __bindgen_padding_0: [u8; 8usize], - pub neg_align: __IncompleteArrayField<::std::os::raw::c_char>, - pub neg: CPUNegativeOffsetState, -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct CPUState__bindgen_ty_1 { - pub sqh_first: *mut qemu_work_item, - pub sqh_last: *mut *mut qemu_work_item, -} -#[test] -fn bindgen_test_layout_CPUState__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(CPUState__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUState__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sqh_first) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUState__bindgen_ty_1), - "::", - stringify!(sqh_first) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sqh_last) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(CPUState__bindgen_ty_1), - "::", - stringify!(sqh_last) - ) - ); -} -impl Default for CPUState__bindgen_ty_1 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CPUState__bindgen_ty_2 { - pub tqe_next: *mut CPUState, - pub tqe_circ: QTailQLink, -} -#[test] -fn bindgen_test_layout_CPUState__bindgen_ty_2() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(CPUState__bindgen_ty_2)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUState__bindgen_ty_2)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqe_next) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUState__bindgen_ty_2), - "::", - stringify!(tqe_next) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqe_circ) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUState__bindgen_ty_2), - "::", - stringify!(tqe_circ) - ) - ); -} -impl Default for CPUState__bindgen_ty_2 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUState__bindgen_ty_2 { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "CPUState__bindgen_ty_2 {{ union }}") - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CPUState__bindgen_ty_3 { - pub tqh_first: *mut CPUBreakpoint, - pub tqh_circ: QTailQLink, -} -#[test] -fn bindgen_test_layout_CPUState__bindgen_ty_3() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(CPUState__bindgen_ty_3)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUState__bindgen_ty_3)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqh_first) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUState__bindgen_ty_3), - "::", - stringify!(tqh_first) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqh_circ) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUState__bindgen_ty_3), - "::", - stringify!(tqh_circ) - ) - ); -} -impl Default for CPUState__bindgen_ty_3 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUState__bindgen_ty_3 { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "CPUState__bindgen_ty_3 {{ union }}") - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CPUState__bindgen_ty_4 { - pub tqh_first: *mut CPUWatchpoint, - pub tqh_circ: QTailQLink, -} -#[test] -fn bindgen_test_layout_CPUState__bindgen_ty_4() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(CPUState__bindgen_ty_4)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUState__bindgen_ty_4)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqh_first) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUState__bindgen_ty_4), - "::", - stringify!(tqh_first) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tqh_circ) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUState__bindgen_ty_4), - "::", - stringify!(tqh_circ) - ) - ); -} -impl Default for CPUState__bindgen_ty_4 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUState__bindgen_ty_4 { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "CPUState__bindgen_ty_4 {{ union }}") - } -} -#[test] -fn bindgen_test_layout_CPUState() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 10176usize, - concat!("Size of: ", stringify!(CPUState)) - ); - assert_eq!( - ::std::mem::align_of::(), - 16usize, - concat!("Alignment of ", stringify!(CPUState)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).parent_obj) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(parent_obj) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cc) as usize - ptr as usize }, - 160usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(cc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).nr_cores) as usize - ptr as usize }, - 168usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(nr_cores) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).nr_threads) as usize - ptr as usize }, - 172usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(nr_threads) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).thread) as usize - ptr as usize }, - 176usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(thread) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).thread_id) as usize - ptr as usize }, - 184usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(thread_id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).running) as usize - ptr as usize }, - 188usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(running) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).has_waiter) as usize - ptr as usize }, - 189usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(has_waiter) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).halt_cond) as usize - ptr as usize }, - 192usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(halt_cond) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).thread_kicked) as usize - ptr as usize }, - 200usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(thread_kicked) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).created) as usize - ptr as usize }, - 201usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(created) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).stop) as usize - ptr as usize }, - 202usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(stop) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).stopped) as usize - ptr as usize }, - 203usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(stopped) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).start_powered_off) as usize - ptr as usize }, - 204usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(start_powered_off) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).unplug) as usize - ptr as usize }, - 205usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(unplug) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).crash_occurred) as usize - ptr as usize }, - 206usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(crash_occurred) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).exit_request) as usize - ptr as usize }, - 207usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(exit_request) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).exclusive_context_count) as usize - ptr as usize }, - 208usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(exclusive_context_count) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cflags_next_tb) as usize - ptr as usize }, - 212usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(cflags_next_tb) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).interrupt_request) as usize - ptr as usize }, - 216usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(interrupt_request) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).singlestep_enabled) as usize - ptr as usize }, - 220usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(singlestep_enabled) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).icount_budget) as usize - ptr as usize }, - 224usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(icount_budget) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).icount_extra) as usize - ptr as usize }, - 232usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(icount_extra) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).random_seed) as usize - ptr as usize }, - 240usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(random_seed) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).jmp_env) as usize - ptr as usize }, - 248usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(jmp_env) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).work_mutex) as usize - ptr as usize }, - 448usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(work_mutex) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).work_list) as usize - ptr as usize }, - 496usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(work_list) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpu_ases) as usize - ptr as usize }, - 512usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(cpu_ases) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).num_ases) as usize - ptr as usize }, - 520usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(num_ases) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).as_) as usize - ptr as usize }, - 528usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(as_) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).memory) as usize - ptr as usize }, - 536usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(memory) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tb_jmp_cache) as usize - ptr as usize }, - 544usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(tb_jmp_cache) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gdb_regs) as usize - ptr as usize }, - 552usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(gdb_regs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gdb_num_regs) as usize - ptr as usize }, - 560usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(gdb_num_regs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gdb_num_g_regs) as usize - ptr as usize }, - 564usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(gdb_num_g_regs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).node) as usize - ptr as usize }, - 568usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(node) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).breakpoints) as usize - ptr as usize }, - 584usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(breakpoints) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).watchpoints) as usize - ptr as usize }, - 600usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(watchpoints) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).watchpoint_hit) as usize - ptr as usize }, - 616usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(watchpoint_hit) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).opaque) as usize - ptr as usize }, - 624usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(opaque) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mem_io_pc) as usize - ptr as usize }, - 632usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(mem_io_pc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).kvm_fd) as usize - ptr as usize }, - 640usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(kvm_fd) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).kvm_state) as usize - ptr as usize }, - 648usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(kvm_state) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).kvm_run) as usize - ptr as usize }, - 656usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(kvm_run) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).kvm_dirty_gfns) as usize - ptr as usize }, - 664usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(kvm_dirty_gfns) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).kvm_fetch_index) as usize - ptr as usize }, - 672usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(kvm_fetch_index) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).dirty_pages) as usize - ptr as usize }, - 680usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(dirty_pages) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).kvm_vcpu_stats_fd) as usize - ptr as usize }, - 688usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(kvm_vcpu_stats_fd) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).in_ioctl_lock) as usize - ptr as usize }, - 692usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(in_ioctl_lock) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).plugin_mem_cbs) as usize - ptr as usize }, - 696usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(plugin_mem_cbs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).plugin_state) as usize - ptr as usize }, - 704usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(plugin_state) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpu_index) as usize - ptr as usize }, - 712usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(cpu_index) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cluster_index) as usize - ptr as usize }, - 716usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(cluster_index) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tcg_cflags) as usize - ptr as usize }, - 720usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(tcg_cflags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).halted) as usize - ptr as usize }, - 724usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(halted) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).exception_index) as usize - ptr as usize }, - 728usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(exception_index) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).accel) as usize - ptr as usize }, - 736usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(accel) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).vcpu_dirty) as usize - ptr as usize }, - 744usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(vcpu_dirty) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).throttle_thread_scheduled) as usize - ptr as usize }, - 745usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(throttle_thread_scheduled) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).throttle_us_per_full) as usize - ptr as usize }, - 752usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(throttle_us_per_full) - ) - ); - assert_eq!( - unsafe { - ::std::ptr::addr_of!((*ptr).ignore_memory_transaction_failures) as usize - ptr as usize - }, - 760usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(ignore_memory_transaction_failures) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).prctl_unalign_sigbus) as usize - ptr as usize }, - 761usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(prctl_unalign_sigbus) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).iommu_notifiers) as usize - ptr as usize }, - 768usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(iommu_notifiers) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).neg_align) as usize - ptr as usize }, - 784usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(neg_align) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).neg) as usize - ptr as usize }, - 784usize, - concat!( - "Offset of field: ", - stringify!(CPUState), - "::", - stringify!(neg) - ) - ); -} -impl Default for CPUState { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUState { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write ! (f , "CPUState {{ parent_obj: {:?}, cc: {:?}, nr_cores: {:?}, nr_threads: {:?}, thread: {:?}, thread_id: {:?}, running: {:?}, has_waiter: {:?}, halt_cond: {:?}, thread_kicked: {:?}, created: {:?}, stop: {:?}, stopped: {:?}, start_powered_off: {:?}, unplug: {:?}, crash_occurred: {:?}, exit_request: {:?}, exclusive_context_count: {:?}, singlestep_enabled: {:?}, jmp_env: {:?}, work_mutex: {:?}, work_list: {:?}, cpu_ases: {:?}, num_ases: {:?}, as: {:?}, memory: {:?}, tb_jmp_cache: {:?}, gdb_regs: {:?}, gdb_num_regs: {:?}, gdb_num_g_regs: {:?}, node: {:?}, breakpoints: {:?}, watchpoints: {:?}, watchpoint_hit: {:?}, opaque: {:?}, kvm_fd: {:?}, kvm_state: {:?}, kvm_run: {:?}, kvm_dirty_gfns: {:?}, kvm_vcpu_stats_fd: {:?}, in_ioctl_lock: {:?}, plugin_mem_cbs: {:?}, plugin_state: {:?}, cpu_index: {:?}, cluster_index: {:?}, accel: {:?}, vcpu_dirty: {:?}, throttle_thread_scheduled: {:?}, ignore_memory_transaction_failures: {:?}, prctl_unalign_sigbus: {:?}, iommu_notifiers: {:?}, neg_align: {:?}, neg: {:?} }}" , self . parent_obj , self . cc , self . nr_cores , self . nr_threads , self . thread , self . thread_id , self . running , self . has_waiter , self . halt_cond , self . thread_kicked , self . created , self . stop , self . stopped , self . start_powered_off , self . unplug , self . crash_occurred , self . exit_request , self . exclusive_context_count , self . singlestep_enabled , self . jmp_env , self . work_mutex , self . work_list , self . cpu_ases , self . num_ases , self . as_ , self . memory , self . tb_jmp_cache , self . gdb_regs , self . gdb_num_regs , self . gdb_num_g_regs , self . node , self . breakpoints , self . watchpoints , self . watchpoint_hit , self . opaque , self . kvm_fd , self . kvm_state , self . kvm_run , self . kvm_dirty_gfns , self . kvm_vcpu_stats_fd , self . in_ioctl_lock , self . plugin_mem_cbs , self . plugin_state , self . cpu_index , self . cluster_index , self . accel , self . vcpu_dirty , self . throttle_thread_scheduled , self . ignore_memory_transaction_failures , self . prctl_unalign_sigbus , self . iommu_notifiers , self . neg_align , self . neg) - } -} -extern "C" { - #[doc = " cpu_reset:\n @cpu: The CPU whose state is to be reset."] - pub fn cpu_reset(cpu: *mut CPUState); -} -pub type target_long = i64; -pub type target_ulong = u64; -#[doc = " Property:\n @set_default: true if the default value should be set from @defval,\n in which case @info->set_default_value must not be NULL\n (if false then no default value is set by the property system\n and the field retains whatever value it was given by instance_init).\n @defval: default value for the property. This is used only if @set_default\n is true."] -#[repr(C)] -#[derive(Copy, Clone)] -pub struct Property { - pub name: *const ::std::os::raw::c_char, - pub info: *const PropertyInfo, - pub offset: isize, - pub bitnr: u8, - pub bitmask: u64, - pub set_default: bool, - pub defval: Property__bindgen_ty_1, - pub arrayoffset: ::std::os::raw::c_int, - pub arrayinfo: *const PropertyInfo, - pub arrayfieldsize: ::std::os::raw::c_int, - pub link_type: *const ::std::os::raw::c_char, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union Property__bindgen_ty_1 { - pub i: i64, - pub u: u64, -} -#[test] -fn bindgen_test_layout_Property__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(Property__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(Property__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).i) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(Property__bindgen_ty_1), - "::", - stringify!(i) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).u) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(Property__bindgen_ty_1), - "::", - stringify!(u) - ) - ); -} -impl Default for Property__bindgen_ty_1 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for Property__bindgen_ty_1 { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "Property__bindgen_ty_1 {{ union }}") - } -} -#[test] -fn bindgen_test_layout_Property() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 88usize, - concat!("Size of: ", stringify!(Property)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(Property)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).name) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(Property), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).info) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(Property), - "::", - stringify!(info) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).offset) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(Property), - "::", - stringify!(offset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).bitnr) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(Property), - "::", - stringify!(bitnr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).bitmask) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(Property), - "::", - stringify!(bitmask) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).set_default) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(Property), - "::", - stringify!(set_default) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).defval) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(Property), - "::", - stringify!(defval) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).arrayoffset) as usize - ptr as usize }, - 56usize, - concat!( - "Offset of field: ", - stringify!(Property), - "::", - stringify!(arrayoffset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).arrayinfo) as usize - ptr as usize }, - 64usize, - concat!( - "Offset of field: ", - stringify!(Property), - "::", - stringify!(arrayinfo) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).arrayfieldsize) as usize - ptr as usize }, - 72usize, - concat!( - "Offset of field: ", - stringify!(Property), - "::", - stringify!(arrayfieldsize) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).link_type) as usize - ptr as usize }, - 80usize, - concat!( - "Offset of field: ", - stringify!(Property), - "::", - stringify!(link_type) - ) - ); -} -impl Default for Property { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for Property { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write ! (f , "Property {{ name: {:?}, info: {:?}, set_default: {:?}, defval: {:?}, arrayoffset: {:?}, arrayinfo: {:?}, arrayfieldsize: {:?}, link_type: {:?} }}" , self . name , self . info , self . set_default , self . defval , self . arrayoffset , self . arrayinfo , self . arrayfieldsize , self . link_type) - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct PropertyInfo { - pub name: *const ::std::os::raw::c_char, - pub description: *const ::std::os::raw::c_char, - pub enum_table: *const QEnumLookup, - pub realized_set_allowed: bool, - pub print: ::std::option::Option< - unsafe extern "C" fn( - obj: *mut Object, - prop: *mut Property, - dest: *mut ::std::os::raw::c_char, - len: usize, - ) -> ::std::os::raw::c_int, - >, - pub set_default_value: - ::std::option::Option, - pub create: ::std::option::Option< - unsafe extern "C" fn( - oc: *mut ObjectClass, - name: *const ::std::os::raw::c_char, - prop: *mut Property, - ) -> *mut ObjectProperty, - >, - pub get: ObjectPropertyAccessor, - pub set: ObjectPropertyAccessor, - pub release: ObjectPropertyRelease, -} -#[test] -fn bindgen_test_layout_PropertyInfo() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 80usize, - concat!("Size of: ", stringify!(PropertyInfo)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(PropertyInfo)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).name) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(PropertyInfo), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).description) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(PropertyInfo), - "::", - stringify!(description) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).enum_table) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(PropertyInfo), - "::", - stringify!(enum_table) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).realized_set_allowed) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(PropertyInfo), - "::", - stringify!(realized_set_allowed) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).print) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(PropertyInfo), - "::", - stringify!(print) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).set_default_value) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(PropertyInfo), - "::", - stringify!(set_default_value) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).create) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(PropertyInfo), - "::", - stringify!(create) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).get) as usize - ptr as usize }, - 56usize, - concat!( - "Offset of field: ", - stringify!(PropertyInfo), - "::", - stringify!(get) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).set) as usize - ptr as usize }, - 64usize, - concat!( - "Offset of field: ", - stringify!(PropertyInfo), - "::", - stringify!(set) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).release) as usize - ptr as usize }, - 72usize, - concat!( - "Offset of field: ", - stringify!(PropertyInfo), - "::", - stringify!(release) - ) - ); -} -impl Default for PropertyInfo { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[doc = " X86CPU:\n @env: #CPUX86State\n @migratable: If set, only migratable flags will be accepted when \"enforce\"\n mode is used, and only migratable flags will be included in the \"host\"\n CPU model.\n\n An x86 CPU."] -pub type X86CPU = ArchCPU; -pub const OnOffAuto_ON_OFF_AUTO_AUTO: OnOffAuto = OnOffAuto(0); -pub const OnOffAuto_ON_OFF_AUTO_ON: OnOffAuto = OnOffAuto(1); -pub const OnOffAuto_ON_OFF_AUTO_OFF: OnOffAuto = OnOffAuto(2); -pub const OnOffAuto_ON_OFF_AUTO__MAX: OnOffAuto = OnOffAuto(3); -impl ::std::ops::BitOr for OnOffAuto { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - OnOffAuto(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for OnOffAuto { - #[inline] - fn bitor_assign(&mut self, rhs: OnOffAuto) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for OnOffAuto { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - OnOffAuto(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for OnOffAuto { - #[inline] - fn bitand_assign(&mut self, rhs: OnOffAuto) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct OnOffAuto(pub ::std::os::raw::c_uint); -pub type float16 = u16; -pub type float32 = u32; -pub type float64 = u64; -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct floatx80 { - pub low: u64, - pub high: u16, -} -#[test] -fn bindgen_test_layout_floatx80() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(floatx80)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(floatx80)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).low) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(floatx80), - "::", - stringify!(low) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).high) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(floatx80), - "::", - stringify!(high) - ) - ); -} -pub const FloatRoundMode_float_round_nearest_even: FloatRoundMode = FloatRoundMode(0); -pub const FloatRoundMode_float_round_down: FloatRoundMode = FloatRoundMode(1); -pub const FloatRoundMode_float_round_up: FloatRoundMode = FloatRoundMode(2); -pub const FloatRoundMode_float_round_to_zero: FloatRoundMode = FloatRoundMode(3); -pub const FloatRoundMode_float_round_ties_away: FloatRoundMode = FloatRoundMode(4); -pub const FloatRoundMode_float_round_to_odd: FloatRoundMode = FloatRoundMode(5); -pub const FloatRoundMode_float_round_to_odd_inf: FloatRoundMode = FloatRoundMode(6); -impl ::std::ops::BitOr for FloatRoundMode { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - FloatRoundMode(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for FloatRoundMode { - #[inline] - fn bitor_assign(&mut self, rhs: FloatRoundMode) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for FloatRoundMode { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - FloatRoundMode(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for FloatRoundMode { - #[inline] - fn bitand_assign(&mut self, rhs: FloatRoundMode) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct FloatRoundMode(pub ::std::os::raw::c_uchar); -pub const FloatX80RoundPrec_floatx80_precision_x: FloatX80RoundPrec = FloatX80RoundPrec(0); -pub const FloatX80RoundPrec_floatx80_precision_d: FloatX80RoundPrec = FloatX80RoundPrec(1); -pub const FloatX80RoundPrec_floatx80_precision_s: FloatX80RoundPrec = FloatX80RoundPrec(2); -impl ::std::ops::BitOr for FloatX80RoundPrec { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - FloatX80RoundPrec(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for FloatX80RoundPrec { - #[inline] - fn bitor_assign(&mut self, rhs: FloatX80RoundPrec) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for FloatX80RoundPrec { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - FloatX80RoundPrec(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for FloatX80RoundPrec { - #[inline] - fn bitand_assign(&mut self, rhs: FloatX80RoundPrec) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct FloatX80RoundPrec(pub ::std::os::raw::c_uchar); -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct float_status { - pub float_exception_flags: u16, - pub float_rounding_mode: FloatRoundMode, - pub floatx80_rounding_precision: FloatX80RoundPrec, - pub tininess_before_rounding: bool, - pub flush_to_zero: bool, - pub flush_inputs_to_zero: bool, - pub default_nan_mode: bool, - pub snan_bit_is_one: bool, - pub use_first_nan: bool, - pub no_signaling_nans: bool, - pub rebias_overflow: bool, - pub rebias_underflow: bool, -} -#[test] -fn bindgen_test_layout_float_status() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 14usize, - concat!("Size of: ", stringify!(float_status)) - ); - assert_eq!( - ::std::mem::align_of::(), - 2usize, - concat!("Alignment of ", stringify!(float_status)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).float_exception_flags) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(float_exception_flags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).float_rounding_mode) as usize - ptr as usize }, - 2usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(float_rounding_mode) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).floatx80_rounding_precision) as usize - ptr as usize }, - 3usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(floatx80_rounding_precision) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tininess_before_rounding) as usize - ptr as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(tininess_before_rounding) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).flush_to_zero) as usize - ptr as usize }, - 5usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(flush_to_zero) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).flush_inputs_to_zero) as usize - ptr as usize }, - 6usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(flush_inputs_to_zero) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).default_nan_mode) as usize - ptr as usize }, - 7usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(default_nan_mode) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).snan_bit_is_one) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(snan_bit_is_one) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).use_first_nan) as usize - ptr as usize }, - 9usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(use_first_nan) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).no_signaling_nans) as usize - ptr as usize }, - 10usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(no_signaling_nans) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).rebias_overflow) as usize - ptr as usize }, - 11usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(rebias_overflow) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).rebias_underflow) as usize - ptr as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(float_status), - "::", - stringify!(rebias_underflow) - ) - ); -} -impl Default for float_status { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -pub type FeatureWordArray = [u64; 39usize]; -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct SegmentCache { - pub selector: u32, - pub base: target_ulong, - pub limit: u32, - pub flags: u32, -} -#[test] -fn bindgen_test_layout_SegmentCache() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(SegmentCache)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(SegmentCache)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).selector) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(SegmentCache), - "::", - stringify!(selector) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).base) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(SegmentCache), - "::", - stringify!(base) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).limit) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(SegmentCache), - "::", - stringify!(limit) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(SegmentCache), - "::", - stringify!(flags) - ) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union MMXReg { - pub _b_MMXReg: [u8; 8usize], - pub _w_MMXReg: [u16; 4usize], - pub _l_MMXReg: [u32; 2usize], - pub _q_MMXReg: [u64; 1usize], - pub _s_MMXReg: [float32; 2usize], - pub _d_MMXReg: [float64; 1usize], -} -#[test] -fn bindgen_test_layout_MMXReg() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(MMXReg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(MMXReg)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._b_MMXReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(MMXReg), - "::", - stringify!(_b_MMXReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._w_MMXReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(MMXReg), - "::", - stringify!(_w_MMXReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._l_MMXReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(MMXReg), - "::", - stringify!(_l_MMXReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._q_MMXReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(MMXReg), - "::", - stringify!(_q_MMXReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._s_MMXReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(MMXReg), - "::", - stringify!(_s_MMXReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._d_MMXReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(MMXReg), - "::", - stringify!(_d_MMXReg) - ) - ); -} -impl Default for MMXReg { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for MMXReg { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "MMXReg {{ union }}") - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union XMMReg { - pub _q_XMMReg: [u64; 2usize], -} -#[test] -fn bindgen_test_layout_XMMReg() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(XMMReg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(XMMReg)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._q_XMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(XMMReg), - "::", - stringify!(_q_XMMReg) - ) - ); -} -impl Default for XMMReg { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for XMMReg { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "XMMReg {{ union }}") - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union YMMReg { - pub _q_YMMReg: [u64; 4usize], - pub _x_YMMReg: [XMMReg; 2usize], -} -#[test] -fn bindgen_test_layout_YMMReg() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(YMMReg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(YMMReg)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._q_YMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(YMMReg), - "::", - stringify!(_q_YMMReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._x_YMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(YMMReg), - "::", - stringify!(_x_YMMReg) - ) - ); -} -impl Default for YMMReg { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for YMMReg { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "YMMReg {{ union }}") - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union ZMMReg { - pub _b_ZMMReg: [u8; 64usize], - pub _w_ZMMReg: [u16; 32usize], - pub _l_ZMMReg: [u32; 16usize], - pub _q_ZMMReg: [u64; 8usize], - pub _h_ZMMReg: [float16; 32usize], - pub _s_ZMMReg: [float32; 16usize], - pub _d_ZMMReg: [float64; 8usize], - pub _x_ZMMReg: [XMMReg; 4usize], - pub _y_ZMMReg: [YMMReg; 2usize], -} -#[test] -fn bindgen_test_layout_ZMMReg() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 64usize, - concat!("Size of: ", stringify!(ZMMReg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(ZMMReg)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._b_ZMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ZMMReg), - "::", - stringify!(_b_ZMMReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._w_ZMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ZMMReg), - "::", - stringify!(_w_ZMMReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._l_ZMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ZMMReg), - "::", - stringify!(_l_ZMMReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._q_ZMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ZMMReg), - "::", - stringify!(_q_ZMMReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._h_ZMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ZMMReg), - "::", - stringify!(_h_ZMMReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._s_ZMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ZMMReg), - "::", - stringify!(_s_ZMMReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._d_ZMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ZMMReg), - "::", - stringify!(_d_ZMMReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._x_ZMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ZMMReg), - "::", - stringify!(_x_ZMMReg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr)._y_ZMMReg) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ZMMReg), - "::", - stringify!(_y_ZMMReg) - ) - ); -} -impl Default for ZMMReg { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for ZMMReg { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "ZMMReg {{ union }}") - } -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct BNDReg { - pub lb: u64, - pub ub: u64, -} -#[test] -fn bindgen_test_layout_BNDReg() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(BNDReg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(BNDReg)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).lb) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(BNDReg), - "::", - stringify!(lb) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).ub) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(BNDReg), - "::", - stringify!(ub) - ) - ); -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct BNDCSReg { - pub cfgu: u64, - pub sts: u64, -} -#[test] -fn bindgen_test_layout_BNDCSReg() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(BNDCSReg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(BNDCSReg)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cfgu) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(BNDCSReg), - "::", - stringify!(cfgu) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sts) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(BNDCSReg), - "::", - stringify!(sts) - ) - ); -} -#[repr(C)] -#[repr(align(16))] -#[derive(Copy, Clone)] -pub union FPReg { - pub d: floatx80, - pub mmx: MMXReg, -} -#[test] -fn bindgen_test_layout_FPReg() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(FPReg)) - ); - assert_eq!( - ::std::mem::align_of::(), - 16usize, - concat!("Alignment of ", stringify!(FPReg)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).d) as usize - ptr as usize }, - 0usize, - concat!("Offset of field: ", stringify!(FPReg), "::", stringify!(d)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mmx) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(FPReg), - "::", - stringify!(mmx) - ) - ); -} -impl Default for FPReg { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for FPReg { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "FPReg {{ union }}") - } -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct MTRRVar { - pub base: u64, - pub mask: u64, -} -#[test] -fn bindgen_test_layout_MTRRVar() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(MTRRVar)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(MTRRVar)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).base) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(MTRRVar), - "::", - stringify!(base) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mask) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(MTRRVar), - "::", - stringify!(mask) - ) - ); -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct LBREntry { - pub from: u64, - pub to: u64, - pub info: u64, -} -#[test] -fn bindgen_test_layout_LBREntry() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(LBREntry)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(LBREntry)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).from) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(LBREntry), - "::", - stringify!(from) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).to) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(LBREntry), - "::", - stringify!(to) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).info) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(LBREntry), - "::", - stringify!(info) - ) - ); -} -pub const TPRAccess_TPR_ACCESS_READ: TPRAccess = TPRAccess(0); -pub const TPRAccess_TPR_ACCESS_WRITE: TPRAccess = TPRAccess(1); -impl ::std::ops::BitOr for TPRAccess { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - TPRAccess(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for TPRAccess { - #[inline] - fn bitor_assign(&mut self, rhs: TPRAccess) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for TPRAccess { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - TPRAccess(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for TPRAccess { - #[inline] - fn bitand_assign(&mut self, rhs: TPRAccess) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct TPRAccess(pub ::std::os::raw::c_uint); -pub const CacheType_DATA_CACHE: CacheType = CacheType(0); -pub const CacheType_INSTRUCTION_CACHE: CacheType = CacheType(1); -pub const CacheType_UNIFIED_CACHE: CacheType = CacheType(2); -impl ::std::ops::BitOr for CacheType { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - CacheType(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for CacheType { - #[inline] - fn bitor_assign(&mut self, rhs: CacheType) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for CacheType { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - CacheType(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for CacheType { - #[inline] - fn bitand_assign(&mut self, rhs: CacheType) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct CacheType(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct CPUCacheInfo { - pub type_: CacheType, - pub level: u8, - pub size: u32, - pub line_size: u16, - pub associativity: u8, - pub partitions: u8, - pub sets: u32, - pub lines_per_tag: u8, - pub self_init: bool, - pub no_invd_sharing: bool, - pub inclusive: bool, - pub complex_indexing: bool, -} -#[test] -fn bindgen_test_layout_CPUCacheInfo() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 28usize, - concat!("Size of: ", stringify!(CPUCacheInfo)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(CPUCacheInfo)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).type_) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(type_) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).level) as usize - ptr as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(level) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).size) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(size) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).line_size) as usize - ptr as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(line_size) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).associativity) as usize - ptr as usize }, - 14usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(associativity) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).partitions) as usize - ptr as usize }, - 15usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(partitions) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sets) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(sets) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).lines_per_tag) as usize - ptr as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(lines_per_tag) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).self_init) as usize - ptr as usize }, - 21usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(self_init) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).no_invd_sharing) as usize - ptr as usize }, - 22usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(no_invd_sharing) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).inclusive) as usize - ptr as usize }, - 23usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(inclusive) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).complex_indexing) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(CPUCacheInfo), - "::", - stringify!(complex_indexing) - ) - ); -} -impl Default for CPUCacheInfo { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct CPUCaches { - pub l1d_cache: *mut CPUCacheInfo, - pub l1i_cache: *mut CPUCacheInfo, - pub l2_cache: *mut CPUCacheInfo, - pub l3_cache: *mut CPUCacheInfo, -} -#[test] -fn bindgen_test_layout_CPUCaches() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(CPUCaches)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUCaches)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).l1d_cache) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUCaches), - "::", - stringify!(l1d_cache) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).l1i_cache) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(CPUCaches), - "::", - stringify!(l1i_cache) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).l2_cache) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(CPUCaches), - "::", - stringify!(l2_cache) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).l3_cache) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(CPUCaches), - "::", - stringify!(l3_cache) - ) - ); -} -impl Default for CPUCaches { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[repr(align(16))] -#[derive(Copy, Clone)] -pub struct CPUArchState { - pub regs: [target_ulong; 16usize], - pub eip: target_ulong, - pub eflags: target_ulong, - pub cc_dst: target_ulong, - pub cc_src: target_ulong, - pub cc_src2: target_ulong, - pub cc_op: u32, - pub df: i32, - pub hflags: u32, - pub hflags2: u32, - pub segs: [SegmentCache; 6usize], - pub ldt: SegmentCache, - pub tr: SegmentCache, - pub gdt: SegmentCache, - pub idt: SegmentCache, - pub cr: [target_ulong; 5usize], - pub pdptrs_valid: bool, - pub pdptrs: [u64; 4usize], - pub a20_mask: i32, - pub bnd_regs: [BNDReg; 4usize], - pub bndcs_regs: BNDCSReg, - pub msr_bndcfgs: u64, - pub efer: u64, - pub start_init_save: CPUArchState__bindgen_ty_1, - pub fpstt: ::std::os::raw::c_uint, - pub fpus: u16, - pub fpuc: u16, - pub fptags: [u8; 8usize], - pub fpregs: [FPReg; 8usize], - pub fpop: u16, - pub fpcs: u16, - pub fpds: u16, - pub fpip: u64, - pub fpdp: u64, - pub fp_status: float_status, - pub ft0: floatx80, - pub mmx_status: float_status, - pub sse_status: float_status, - pub mxcsr: u32, - pub __bindgen_padding_0: u64, - pub xmm_regs: [ZMMReg; 32usize], - pub xmm_t0: ZMMReg, - pub mmx_t0: MMXReg, - pub opmask_regs: [u64; 8usize], - pub xtilecfg: [u8; 64usize], - pub xtiledata: [u8; 8192usize], - pub sysenter_cs: u32, - pub sysenter_esp: target_ulong, - pub sysenter_eip: target_ulong, - pub star: u64, - pub vm_hsave: u64, - pub lstar: target_ulong, - pub cstar: target_ulong, - pub fmask: target_ulong, - pub kernelgsbase: target_ulong, - pub tsc_adjust: u64, - pub tsc_deadline: u64, - pub tsc_aux: u64, - pub xcr0: u64, - pub mcg_status: u64, - pub msr_ia32_misc_enable: u64, - pub msr_ia32_feature_control: u64, - pub msr_ia32_sgxlepubkeyhash: [u64; 4usize], - pub msr_fixed_ctr_ctrl: u64, - pub msr_global_ctrl: u64, - pub msr_global_status: u64, - pub msr_global_ovf_ctrl: u64, - pub msr_fixed_counters: [u64; 3usize], - pub msr_gp_counters: [u64; 18usize], - pub msr_gp_evtsel: [u64; 18usize], - pub pat: u64, - pub smbase: u32, - pub msr_smi_count: u64, - pub pkru: u32, - pub pkrs: u32, - pub tsx_ctrl: u32, - pub spec_ctrl: u64, - pub amd_tsc_scale_msr: u64, - pub virt_ssbd: u64, - pub end_init_save: CPUArchState__bindgen_ty_2, - pub system_time_msr: u64, - pub wall_clock_msr: u64, - pub steal_time_msr: u64, - pub async_pf_en_msr: u64, - pub async_pf_int_msr: u64, - pub pv_eoi_en_msr: u64, - pub poll_control_msr: u64, - pub msr_hv_hypercall: u64, - pub msr_hv_guest_os_id: u64, - pub msr_hv_tsc: u64, - pub msr_hv_syndbg_control: u64, - pub msr_hv_syndbg_status: u64, - pub msr_hv_syndbg_send_page: u64, - pub msr_hv_syndbg_recv_page: u64, - pub msr_hv_syndbg_pending_page: u64, - pub msr_hv_syndbg_options: u64, - pub msr_hv_vapic: u64, - pub msr_hv_crash_params: [u64; 5usize], - pub msr_hv_runtime: u64, - pub msr_hv_synic_control: u64, - pub msr_hv_synic_evt_page: u64, - pub msr_hv_synic_msg_page: u64, - pub msr_hv_synic_sint: [u64; 16usize], - pub msr_hv_stimer_config: [u64; 4usize], - pub msr_hv_stimer_count: [u64; 4usize], - pub msr_hv_reenlightenment_control: u64, - pub msr_hv_tsc_emulation_control: u64, - pub msr_hv_tsc_emulation_status: u64, - pub msr_rtit_ctrl: u64, - pub msr_rtit_status: u64, - pub msr_rtit_output_base: u64, - pub msr_rtit_output_mask: u64, - pub msr_rtit_cr3_match: u64, - pub msr_rtit_addrs: [u64; 8usize], - pub msr_xfd: u64, - pub msr_xfd_err: u64, - pub msr_lbr_ctl: u64, - pub msr_lbr_depth: u64, - pub lbr_records: [LBREntry; 32usize], - pub error_code: ::std::os::raw::c_int, - pub exception_is_int: ::std::os::raw::c_int, - pub exception_next_eip: target_ulong, - pub dr: [target_ulong; 8usize], - pub __bindgen_anon_1: CPUArchState__bindgen_ty_3, - pub old_exception: ::std::os::raw::c_int, - pub vm_vmcb: u64, - pub tsc_offset: u64, - pub intercept: u64, - pub intercept_cr_read: u16, - pub intercept_cr_write: u16, - pub intercept_dr_read: u16, - pub intercept_dr_write: u16, - pub intercept_exceptions: u32, - pub nested_cr3: u64, - pub nested_pg_mode: u32, - pub v_tpr: u8, - pub int_ctl: u32, - pub nmi_injected: u8, - pub nmi_pending: u8, - pub retaddr: usize, - pub end_reset_fields: CPUArchState__bindgen_ty_4, - pub cpuid_level_func7: u32, - pub cpuid_min_level_func7: u32, - pub cpuid_min_level: u32, - pub cpuid_min_xlevel: u32, - pub cpuid_min_xlevel2: u32, - pub cpuid_max_level: u32, - pub cpuid_max_xlevel: u32, - pub cpuid_max_xlevel2: u32, - pub cpuid_level: u32, - pub cpuid_xlevel: u32, - pub cpuid_xlevel2: u32, - pub cpuid_vendor1: u32, - pub cpuid_vendor2: u32, - pub cpuid_vendor3: u32, - pub cpuid_version: u32, - pub features: FeatureWordArray, - pub user_features: FeatureWordArray, - pub cpuid_model: [u32; 12usize], - pub cache_info_cpuid2: CPUCaches, - pub cache_info_cpuid4: CPUCaches, - pub cache_info_amd: CPUCaches, - pub mtrr_fixed: [u64; 11usize], - pub mtrr_deftype: u64, - pub mtrr_var: [MTRRVar; 8usize], - pub mp_state: u32, - pub exception_nr: i32, - pub interrupt_injected: i32, - pub soft_interrupt: u8, - pub exception_pending: u8, - pub exception_injected: u8, - pub has_error_code: u8, - pub exception_has_payload: u8, - pub exception_payload: u64, - pub triple_fault_pending: u8, - pub ins_len: u32, - pub sipi_vector: u32, - pub tsc_valid: bool, - pub tsc_khz: i64, - pub user_tsc_khz: i64, - pub apic_bus_freq: u64, - pub tsc: u64, - pub mcg_cap: u64, - pub mcg_ctl: u64, - pub mcg_ext_ctl: u64, - pub mce_banks: [u64; 40usize], - pub xstate_bv: u64, - pub fpus_vmstate: u16, - pub fptag_vmstate: u16, - pub fpregs_format_vmstate: u16, - pub xss: u64, - pub umwait: u32, - pub tpr_access_type: TPRAccess, - pub nr_dies: ::std::os::raw::c_uint, -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct CPUArchState__bindgen_ty_1 {} -#[test] -fn bindgen_test_layout_CPUArchState__bindgen_ty_1() { - assert_eq!( - ::std::mem::size_of::(), - 0usize, - concat!("Size of: ", stringify!(CPUArchState__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!("Alignment of ", stringify!(CPUArchState__bindgen_ty_1)) - ); -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct CPUArchState__bindgen_ty_2 {} -#[test] -fn bindgen_test_layout_CPUArchState__bindgen_ty_2() { - assert_eq!( - ::std::mem::size_of::(), - 0usize, - concat!("Size of: ", stringify!(CPUArchState__bindgen_ty_2)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!("Alignment of ", stringify!(CPUArchState__bindgen_ty_2)) - ); -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union CPUArchState__bindgen_ty_3 { - pub cpu_breakpoint: [*mut CPUBreakpoint; 4usize], - pub cpu_watchpoint: [*mut CPUWatchpoint; 4usize], -} -#[test] -fn bindgen_test_layout_CPUArchState__bindgen_ty_3() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(CPUArchState__bindgen_ty_3)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUArchState__bindgen_ty_3)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpu_breakpoint) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState__bindgen_ty_3), - "::", - stringify!(cpu_breakpoint) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpu_watchpoint) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState__bindgen_ty_3), - "::", - stringify!(cpu_watchpoint) - ) - ); -} -impl Default for CPUArchState__bindgen_ty_3 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUArchState__bindgen_ty_3 { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "CPUArchState__bindgen_ty_3 {{ union }}") - } -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct CPUArchState__bindgen_ty_4 {} -#[test] -fn bindgen_test_layout_CPUArchState__bindgen_ty_4() { - assert_eq!( - ::std::mem::size_of::(), - 0usize, - concat!("Size of: ", stringify!(CPUArchState__bindgen_ty_4)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!("Alignment of ", stringify!(CPUArchState__bindgen_ty_4)) - ); -} -#[test] -fn bindgen_test_layout_CPUArchState() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 14896usize, - concat!("Size of: ", stringify!(CPUArchState)) - ); - assert_eq!( - ::std::mem::align_of::(), - 16usize, - concat!("Alignment of ", stringify!(CPUArchState)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).regs) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(regs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).eip) as usize - ptr as usize }, - 128usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(eip) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).eflags) as usize - ptr as usize }, - 136usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(eflags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cc_dst) as usize - ptr as usize }, - 144usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cc_dst) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cc_src) as usize - ptr as usize }, - 152usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cc_src) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cc_src2) as usize - ptr as usize }, - 160usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cc_src2) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cc_op) as usize - ptr as usize }, - 168usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cc_op) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).df) as usize - ptr as usize }, - 172usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(df) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hflags) as usize - ptr as usize }, - 176usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(hflags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hflags2) as usize - ptr as usize }, - 180usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(hflags2) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).segs) as usize - ptr as usize }, - 184usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(segs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).ldt) as usize - ptr as usize }, - 328usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(ldt) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tr) as usize - ptr as usize }, - 352usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(tr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).gdt) as usize - ptr as usize }, - 376usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(gdt) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).idt) as usize - ptr as usize }, - 400usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(idt) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cr) as usize - ptr as usize }, - 424usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pdptrs_valid) as usize - ptr as usize }, - 464usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(pdptrs_valid) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pdptrs) as usize - ptr as usize }, - 472usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(pdptrs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).a20_mask) as usize - ptr as usize }, - 504usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(a20_mask) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).bnd_regs) as usize - ptr as usize }, - 512usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(bnd_regs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).bndcs_regs) as usize - ptr as usize }, - 576usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(bndcs_regs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_bndcfgs) as usize - ptr as usize }, - 592usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_bndcfgs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).efer) as usize - ptr as usize }, - 600usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(efer) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).start_init_save) as usize - ptr as usize }, - 608usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(start_init_save) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fpstt) as usize - ptr as usize }, - 608usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fpstt) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fpus) as usize - ptr as usize }, - 612usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fpus) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fpuc) as usize - ptr as usize }, - 614usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fpuc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fptags) as usize - ptr as usize }, - 616usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fptags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fpregs) as usize - ptr as usize }, - 624usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fpregs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fpop) as usize - ptr as usize }, - 752usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fpop) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fpcs) as usize - ptr as usize }, - 754usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fpcs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fpds) as usize - ptr as usize }, - 756usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fpds) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fpip) as usize - ptr as usize }, - 760usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fpip) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fpdp) as usize - ptr as usize }, - 768usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fpdp) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fp_status) as usize - ptr as usize }, - 776usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fp_status) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).ft0) as usize - ptr as usize }, - 792usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(ft0) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mmx_status) as usize - ptr as usize }, - 808usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mmx_status) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sse_status) as usize - ptr as usize }, - 822usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(sse_status) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mxcsr) as usize - ptr as usize }, - 836usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mxcsr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).xmm_regs) as usize - ptr as usize }, - 848usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(xmm_regs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).xmm_t0) as usize - ptr as usize }, - 2896usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(xmm_t0) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mmx_t0) as usize - ptr as usize }, - 2960usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mmx_t0) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).opmask_regs) as usize - ptr as usize }, - 2968usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(opmask_regs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).xtilecfg) as usize - ptr as usize }, - 3032usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(xtilecfg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).xtiledata) as usize - ptr as usize }, - 3096usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(xtiledata) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sysenter_cs) as usize - ptr as usize }, - 11288usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(sysenter_cs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sysenter_esp) as usize - ptr as usize }, - 11296usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(sysenter_esp) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sysenter_eip) as usize - ptr as usize }, - 11304usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(sysenter_eip) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).star) as usize - ptr as usize }, - 11312usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(star) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).vm_hsave) as usize - ptr as usize }, - 11320usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(vm_hsave) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).lstar) as usize - ptr as usize }, - 11328usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(lstar) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cstar) as usize - ptr as usize }, - 11336usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cstar) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fmask) as usize - ptr as usize }, - 11344usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fmask) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).kernelgsbase) as usize - ptr as usize }, - 11352usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(kernelgsbase) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tsc_adjust) as usize - ptr as usize }, - 11360usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(tsc_adjust) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tsc_deadline) as usize - ptr as usize }, - 11368usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(tsc_deadline) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tsc_aux) as usize - ptr as usize }, - 11376usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(tsc_aux) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).xcr0) as usize - ptr as usize }, - 11384usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(xcr0) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mcg_status) as usize - ptr as usize }, - 11392usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mcg_status) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_ia32_misc_enable) as usize - ptr as usize }, - 11400usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_ia32_misc_enable) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_ia32_feature_control) as usize - ptr as usize }, - 11408usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_ia32_feature_control) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_ia32_sgxlepubkeyhash) as usize - ptr as usize }, - 11416usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_ia32_sgxlepubkeyhash) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_fixed_ctr_ctrl) as usize - ptr as usize }, - 11448usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_fixed_ctr_ctrl) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_global_ctrl) as usize - ptr as usize }, - 11456usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_global_ctrl) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_global_status) as usize - ptr as usize }, - 11464usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_global_status) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_global_ovf_ctrl) as usize - ptr as usize }, - 11472usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_global_ovf_ctrl) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_fixed_counters) as usize - ptr as usize }, - 11480usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_fixed_counters) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_gp_counters) as usize - ptr as usize }, - 11504usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_gp_counters) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_gp_evtsel) as usize - ptr as usize }, - 11648usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_gp_evtsel) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pat) as usize - ptr as usize }, - 11792usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(pat) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).smbase) as usize - ptr as usize }, - 11800usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(smbase) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_smi_count) as usize - ptr as usize }, - 11808usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_smi_count) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pkru) as usize - ptr as usize }, - 11816usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(pkru) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pkrs) as usize - ptr as usize }, - 11820usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(pkrs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tsx_ctrl) as usize - ptr as usize }, - 11824usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(tsx_ctrl) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).spec_ctrl) as usize - ptr as usize }, - 11832usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(spec_ctrl) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).amd_tsc_scale_msr) as usize - ptr as usize }, - 11840usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(amd_tsc_scale_msr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).virt_ssbd) as usize - ptr as usize }, - 11848usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(virt_ssbd) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).end_init_save) as usize - ptr as usize }, - 11856usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(end_init_save) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).system_time_msr) as usize - ptr as usize }, - 11856usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(system_time_msr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).wall_clock_msr) as usize - ptr as usize }, - 11864usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(wall_clock_msr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).steal_time_msr) as usize - ptr as usize }, - 11872usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(steal_time_msr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).async_pf_en_msr) as usize - ptr as usize }, - 11880usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(async_pf_en_msr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).async_pf_int_msr) as usize - ptr as usize }, - 11888usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(async_pf_int_msr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pv_eoi_en_msr) as usize - ptr as usize }, - 11896usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(pv_eoi_en_msr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).poll_control_msr) as usize - ptr as usize }, - 11904usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(poll_control_msr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_hypercall) as usize - ptr as usize }, - 11912usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_hypercall) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_guest_os_id) as usize - ptr as usize }, - 11920usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_guest_os_id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_tsc) as usize - ptr as usize }, - 11928usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_tsc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_syndbg_control) as usize - ptr as usize }, - 11936usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_syndbg_control) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_syndbg_status) as usize - ptr as usize }, - 11944usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_syndbg_status) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_syndbg_send_page) as usize - ptr as usize }, - 11952usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_syndbg_send_page) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_syndbg_recv_page) as usize - ptr as usize }, - 11960usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_syndbg_recv_page) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_syndbg_pending_page) as usize - ptr as usize }, - 11968usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_syndbg_pending_page) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_syndbg_options) as usize - ptr as usize }, - 11976usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_syndbg_options) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_vapic) as usize - ptr as usize }, - 11984usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_vapic) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_crash_params) as usize - ptr as usize }, - 11992usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_crash_params) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_runtime) as usize - ptr as usize }, - 12032usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_runtime) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_synic_control) as usize - ptr as usize }, - 12040usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_synic_control) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_synic_evt_page) as usize - ptr as usize }, - 12048usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_synic_evt_page) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_synic_msg_page) as usize - ptr as usize }, - 12056usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_synic_msg_page) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_synic_sint) as usize - ptr as usize }, - 12064usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_synic_sint) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_stimer_config) as usize - ptr as usize }, - 12192usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_stimer_config) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_stimer_count) as usize - ptr as usize }, - 12224usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_stimer_count) - ) - ); - assert_eq!( - unsafe { - ::std::ptr::addr_of!((*ptr).msr_hv_reenlightenment_control) as usize - ptr as usize - }, - 12256usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_reenlightenment_control) - ) - ); - assert_eq!( - unsafe { - ::std::ptr::addr_of!((*ptr).msr_hv_tsc_emulation_control) as usize - ptr as usize - }, - 12264usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_tsc_emulation_control) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_hv_tsc_emulation_status) as usize - ptr as usize }, - 12272usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_hv_tsc_emulation_status) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_rtit_ctrl) as usize - ptr as usize }, - 12280usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_rtit_ctrl) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_rtit_status) as usize - ptr as usize }, - 12288usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_rtit_status) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_rtit_output_base) as usize - ptr as usize }, - 12296usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_rtit_output_base) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_rtit_output_mask) as usize - ptr as usize }, - 12304usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_rtit_output_mask) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_rtit_cr3_match) as usize - ptr as usize }, - 12312usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_rtit_cr3_match) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_rtit_addrs) as usize - ptr as usize }, - 12320usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_rtit_addrs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_xfd) as usize - ptr as usize }, - 12384usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_xfd) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_xfd_err) as usize - ptr as usize }, - 12392usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_xfd_err) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_lbr_ctl) as usize - ptr as usize }, - 12400usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_lbr_ctl) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).msr_lbr_depth) as usize - ptr as usize }, - 12408usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(msr_lbr_depth) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).lbr_records) as usize - ptr as usize }, - 12416usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(lbr_records) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).error_code) as usize - ptr as usize }, - 13184usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(error_code) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).exception_is_int) as usize - ptr as usize }, - 13188usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(exception_is_int) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).exception_next_eip) as usize - ptr as usize }, - 13192usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(exception_next_eip) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).dr) as usize - ptr as usize }, - 13200usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(dr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).old_exception) as usize - ptr as usize }, - 13296usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(old_exception) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).vm_vmcb) as usize - ptr as usize }, - 13304usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(vm_vmcb) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tsc_offset) as usize - ptr as usize }, - 13312usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(tsc_offset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).intercept) as usize - ptr as usize }, - 13320usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(intercept) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).intercept_cr_read) as usize - ptr as usize }, - 13328usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(intercept_cr_read) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).intercept_cr_write) as usize - ptr as usize }, - 13330usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(intercept_cr_write) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).intercept_dr_read) as usize - ptr as usize }, - 13332usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(intercept_dr_read) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).intercept_dr_write) as usize - ptr as usize }, - 13334usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(intercept_dr_write) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).intercept_exceptions) as usize - ptr as usize }, - 13336usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(intercept_exceptions) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).nested_cr3) as usize - ptr as usize }, - 13344usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(nested_cr3) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).nested_pg_mode) as usize - ptr as usize }, - 13352usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(nested_pg_mode) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).v_tpr) as usize - ptr as usize }, - 13356usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(v_tpr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).int_ctl) as usize - ptr as usize }, - 13360usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(int_ctl) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).nmi_injected) as usize - ptr as usize }, - 13364usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(nmi_injected) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).nmi_pending) as usize - ptr as usize }, - 13365usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(nmi_pending) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).retaddr) as usize - ptr as usize }, - 13368usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(retaddr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).end_reset_fields) as usize - ptr as usize }, - 13376usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(end_reset_fields) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_level_func7) as usize - ptr as usize }, - 13376usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_level_func7) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_min_level_func7) as usize - ptr as usize }, - 13380usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_min_level_func7) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_min_level) as usize - ptr as usize }, - 13384usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_min_level) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_min_xlevel) as usize - ptr as usize }, - 13388usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_min_xlevel) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_min_xlevel2) as usize - ptr as usize }, - 13392usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_min_xlevel2) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_max_level) as usize - ptr as usize }, - 13396usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_max_level) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_max_xlevel) as usize - ptr as usize }, - 13400usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_max_xlevel) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_max_xlevel2) as usize - ptr as usize }, - 13404usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_max_xlevel2) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_level) as usize - ptr as usize }, - 13408usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_level) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_xlevel) as usize - ptr as usize }, - 13412usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_xlevel) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_xlevel2) as usize - ptr as usize }, - 13416usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_xlevel2) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_vendor1) as usize - ptr as usize }, - 13420usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_vendor1) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_vendor2) as usize - ptr as usize }, - 13424usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_vendor2) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_vendor3) as usize - ptr as usize }, - 13428usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_vendor3) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_version) as usize - ptr as usize }, - 13432usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_version) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).features) as usize - ptr as usize }, - 13440usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(features) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).user_features) as usize - ptr as usize }, - 13752usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(user_features) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpuid_model) as usize - ptr as usize }, - 14064usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cpuid_model) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cache_info_cpuid2) as usize - ptr as usize }, - 14112usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cache_info_cpuid2) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cache_info_cpuid4) as usize - ptr as usize }, - 14144usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cache_info_cpuid4) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cache_info_amd) as usize - ptr as usize }, - 14176usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(cache_info_amd) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mtrr_fixed) as usize - ptr as usize }, - 14208usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mtrr_fixed) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mtrr_deftype) as usize - ptr as usize }, - 14296usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mtrr_deftype) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mtrr_var) as usize - ptr as usize }, - 14304usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mtrr_var) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mp_state) as usize - ptr as usize }, - 14432usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mp_state) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).exception_nr) as usize - ptr as usize }, - 14436usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(exception_nr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).interrupt_injected) as usize - ptr as usize }, - 14440usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(interrupt_injected) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).soft_interrupt) as usize - ptr as usize }, - 14444usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(soft_interrupt) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).exception_pending) as usize - ptr as usize }, - 14445usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(exception_pending) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).exception_injected) as usize - ptr as usize }, - 14446usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(exception_injected) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).has_error_code) as usize - ptr as usize }, - 14447usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(has_error_code) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).exception_has_payload) as usize - ptr as usize }, - 14448usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(exception_has_payload) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).exception_payload) as usize - ptr as usize }, - 14456usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(exception_payload) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).triple_fault_pending) as usize - ptr as usize }, - 14464usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(triple_fault_pending) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).ins_len) as usize - ptr as usize }, - 14468usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(ins_len) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sipi_vector) as usize - ptr as usize }, - 14472usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(sipi_vector) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tsc_valid) as usize - ptr as usize }, - 14476usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(tsc_valid) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tsc_khz) as usize - ptr as usize }, - 14480usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(tsc_khz) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).user_tsc_khz) as usize - ptr as usize }, - 14488usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(user_tsc_khz) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).apic_bus_freq) as usize - ptr as usize }, - 14496usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(apic_bus_freq) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tsc) as usize - ptr as usize }, - 14504usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(tsc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mcg_cap) as usize - ptr as usize }, - 14512usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mcg_cap) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mcg_ctl) as usize - ptr as usize }, - 14520usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mcg_ctl) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mcg_ext_ctl) as usize - ptr as usize }, - 14528usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mcg_ext_ctl) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mce_banks) as usize - ptr as usize }, - 14536usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(mce_banks) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).xstate_bv) as usize - ptr as usize }, - 14856usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(xstate_bv) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fpus_vmstate) as usize - ptr as usize }, - 14864usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fpus_vmstate) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fptag_vmstate) as usize - ptr as usize }, - 14866usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fptag_vmstate) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fpregs_format_vmstate) as usize - ptr as usize }, - 14868usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(fpregs_format_vmstate) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).xss) as usize - ptr as usize }, - 14872usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(xss) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).umwait) as usize - ptr as usize }, - 14880usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(umwait) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tpr_access_type) as usize - ptr as usize }, - 14884usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(tpr_access_type) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).nr_dies) as usize - ptr as usize }, - 14888usize, - concat!( - "Offset of field: ", - stringify!(CPUArchState), - "::", - stringify!(nr_dies) - ) - ); -} -impl Default for CPUArchState { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for CPUArchState { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write ! (f , "CPUArchState {{ regs: {:?}, segs: {:?}, ldt: {:?}, tr: {:?}, gdt: {:?}, idt: {:?}, cr: {:?}, pdptrs_valid: {:?}, pdptrs: {:?}, bnd_regs: {:?}, bndcs_regs: {:?}, start_init_save: {:?}, fpstt: {:?}, fptags: {:?}, fpregs: {:?}, fp_status: {:?}, ft0: {:?}, mmx_status: {:?}, sse_status: {:?}, xmm_regs: {:?}, xmm_t0: {:?}, mmx_t0: {:?}, opmask_regs: {:?}, xtilecfg: {:?}, xtiledata: {:?}, msr_ia32_sgxlepubkeyhash: {:?}, msr_fixed_counters: {:?}, msr_gp_counters: {:?}, msr_gp_evtsel: {:?}, end_init_save: {:?}, msr_hv_crash_params: {:?}, msr_hv_synic_sint: {:?}, msr_hv_stimer_config: {:?}, msr_hv_stimer_count: {:?}, msr_rtit_addrs: {:?}, lbr_records: {:?}, error_code: {:?}, exception_is_int: {:?}, dr: {:?}, __bindgen_anon_1: {:?}, old_exception: {:?}, end_reset_fields: {:?}, features: {:?}, user_features: {:?}, cpuid_model: {:?}, cache_info_cpuid2: {:?}, cache_info_cpuid4: {:?}, cache_info_amd: {:?}, mtrr_fixed: {:?}, mtrr_var: {:?}, tsc_valid: {:?}, mce_banks: {:?}, tpr_access_type: {:?}, nr_dies: {:?} }}" , self . regs , self . segs , self . ldt , self . tr , self . gdt , self . idt , self . cr , self . pdptrs_valid , self . pdptrs , self . bnd_regs , self . bndcs_regs , self . start_init_save , self . fpstt , self . fptags , self . fpregs , self . fp_status , self . ft0 , self . mmx_status , self . sse_status , self . xmm_regs , self . xmm_t0 , self . mmx_t0 , self . opmask_regs , self . xtilecfg , self . xtiledata , self . msr_ia32_sgxlepubkeyhash , self . msr_fixed_counters , self . msr_gp_counters , self . msr_gp_evtsel , self . end_init_save , self . msr_hv_crash_params , self . msr_hv_synic_sint , self . msr_hv_stimer_config , self . msr_hv_stimer_count , self . msr_rtit_addrs , self . lbr_records , self . error_code , self . exception_is_int , self . dr , self . __bindgen_anon_1 , self . old_exception , self . end_reset_fields , self . features , self . user_features , self . cpuid_model , self . cache_info_cpuid2 , self . cache_info_cpuid4 , self . cache_info_amd , self . mtrr_fixed , self . mtrr_var , self . tsc_valid , self . mce_banks , self . tpr_access_type , self . nr_dies) - } -} -pub type CPUX86State = CPUArchState; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct kvm_msrs { - _unused: [u8; 0], -} -#[doc = " X86CPU:\n @env: #CPUX86State\n @migratable: If set, only migratable flags will be accepted when \"enforce\"\n mode is used, and only migratable flags will be included in the \"host\"\n CPU model.\n\n An x86 CPU."] -#[repr(C)] -#[repr(align(16))] -pub struct ArchCPU { - pub parent_obj: CPUState, - pub env: CPUX86State, - pub vmsentry: *mut VMChangeStateEntry, - pub ucode_rev: u64, - pub hyperv_spinlock_attempts: u32, - pub hyperv_vendor: *mut ::std::os::raw::c_char, - pub hyperv_synic_kvm_only: bool, - pub hyperv_features: u64, - pub hyperv_passthrough: bool, - pub hyperv_no_nonarch_cs: OnOffAuto, - pub hyperv_vendor_id: [u32; 3usize], - pub hyperv_interface_id: [u32; 4usize], - pub hyperv_limits: [u32; 3usize], - pub hyperv_enforce_cpuid: bool, - pub hyperv_ver_id_build: u32, - pub hyperv_ver_id_major: u16, - pub hyperv_ver_id_minor: u16, - pub hyperv_ver_id_sp: u32, - pub hyperv_ver_id_sb: u8, - pub hyperv_ver_id_sn: u32, - pub check_cpuid: bool, - pub enforce_cpuid: bool, - pub force_features: bool, - pub expose_kvm: bool, - pub expose_tcg: bool, - pub migratable: bool, - pub migrate_smi_count: bool, - pub max_features: bool, - pub apic_id: u32, - pub vmware_cpuid_freq: bool, - pub cache_info_passthrough: bool, - pub mwait: ArchCPU__bindgen_ty_1, - pub filtered_features: FeatureWordArray, - pub enable_pmu: bool, - pub lbr_fmt: u64, - pub enable_lmce: bool, - pub enable_l3_cache: bool, - pub legacy_cache: bool, - pub enable_cpuid_0xb: bool, - pub full_cpuid_auto_level: bool, - pub vendor_cpuid_only: bool, - pub intel_pt_auto_level: bool, - pub fill_mtrr_mask: bool, - pub host_phys_bits: bool, - pub host_phys_bits_limit: u8, - pub kvm_no_smi_migration: bool, - pub kvm_pv_enforce_cpuid: bool, - pub phys_bits: u32, - pub apic_state: *mut DeviceState, - pub cpu_as_root: *mut MemoryRegion, - pub cpu_as_mem: *mut MemoryRegion, - pub smram: *mut MemoryRegion, - pub machine_done: Notifier, - pub kvm_msr_buf: *mut kvm_msrs, - pub node_id: i32, - pub socket_id: i32, - pub die_id: i32, - pub core_id: i32, - pub thread_id: i32, - pub hv_max_vps: i32, - pub xen_vapic: bool, -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct ArchCPU__bindgen_ty_1 { - pub eax: u32, - pub ebx: u32, - pub ecx: u32, - pub edx: u32, -} -#[test] -fn bindgen_test_layout_ArchCPU__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(ArchCPU__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(ArchCPU__bindgen_ty_1)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).eax) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU__bindgen_ty_1), - "::", - stringify!(eax) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).ebx) as usize - ptr as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU__bindgen_ty_1), - "::", - stringify!(ebx) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).ecx) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU__bindgen_ty_1), - "::", - stringify!(ecx) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).edx) as usize - ptr as usize }, - 12usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU__bindgen_ty_1), - "::", - stringify!(edx) - ) - ); -} -#[test] -fn bindgen_test_layout_ArchCPU() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 25664usize, - concat!("Size of: ", stringify!(ArchCPU)) - ); - assert_eq!( - ::std::mem::align_of::(), - 16usize, - concat!("Alignment of ", stringify!(ArchCPU)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).parent_obj) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(parent_obj) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).env) as usize - ptr as usize }, - 10176usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(env) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).vmsentry) as usize - ptr as usize }, - 25072usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(vmsentry) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).ucode_rev) as usize - ptr as usize }, - 25080usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(ucode_rev) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_spinlock_attempts) as usize - ptr as usize }, - 25088usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_spinlock_attempts) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_vendor) as usize - ptr as usize }, - 25096usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_vendor) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_synic_kvm_only) as usize - ptr as usize }, - 25104usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_synic_kvm_only) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_features) as usize - ptr as usize }, - 25112usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_features) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_passthrough) as usize - ptr as usize }, - 25120usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_passthrough) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_no_nonarch_cs) as usize - ptr as usize }, - 25124usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_no_nonarch_cs) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_vendor_id) as usize - ptr as usize }, - 25128usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_vendor_id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_interface_id) as usize - ptr as usize }, - 25140usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_interface_id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_limits) as usize - ptr as usize }, - 25156usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_limits) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_enforce_cpuid) as usize - ptr as usize }, - 25168usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_enforce_cpuid) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_ver_id_build) as usize - ptr as usize }, - 25172usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_ver_id_build) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_ver_id_major) as usize - ptr as usize }, - 25176usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_ver_id_major) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_ver_id_minor) as usize - ptr as usize }, - 25178usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_ver_id_minor) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_ver_id_sp) as usize - ptr as usize }, - 25180usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_ver_id_sp) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_ver_id_sb) as usize - ptr as usize }, - 25184usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_ver_id_sb) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hyperv_ver_id_sn) as usize - ptr as usize }, - 25188usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hyperv_ver_id_sn) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).check_cpuid) as usize - ptr as usize }, - 25192usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(check_cpuid) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).enforce_cpuid) as usize - ptr as usize }, - 25193usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(enforce_cpuid) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).force_features) as usize - ptr as usize }, - 25194usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(force_features) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).expose_kvm) as usize - ptr as usize }, - 25195usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(expose_kvm) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).expose_tcg) as usize - ptr as usize }, - 25196usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(expose_tcg) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).migratable) as usize - ptr as usize }, - 25197usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(migratable) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).migrate_smi_count) as usize - ptr as usize }, - 25198usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(migrate_smi_count) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).max_features) as usize - ptr as usize }, - 25199usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(max_features) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).apic_id) as usize - ptr as usize }, - 25200usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(apic_id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).vmware_cpuid_freq) as usize - ptr as usize }, - 25204usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(vmware_cpuid_freq) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cache_info_passthrough) as usize - ptr as usize }, - 25205usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(cache_info_passthrough) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mwait) as usize - ptr as usize }, - 25208usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(mwait) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).filtered_features) as usize - ptr as usize }, - 25224usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(filtered_features) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).enable_pmu) as usize - ptr as usize }, - 25536usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(enable_pmu) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).lbr_fmt) as usize - ptr as usize }, - 25544usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(lbr_fmt) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).enable_lmce) as usize - ptr as usize }, - 25552usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(enable_lmce) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).enable_l3_cache) as usize - ptr as usize }, - 25553usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(enable_l3_cache) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).legacy_cache) as usize - ptr as usize }, - 25554usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(legacy_cache) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).enable_cpuid_0xb) as usize - ptr as usize }, - 25555usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(enable_cpuid_0xb) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).full_cpuid_auto_level) as usize - ptr as usize }, - 25556usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(full_cpuid_auto_level) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).vendor_cpuid_only) as usize - ptr as usize }, - 25557usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(vendor_cpuid_only) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).intel_pt_auto_level) as usize - ptr as usize }, - 25558usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(intel_pt_auto_level) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).fill_mtrr_mask) as usize - ptr as usize }, - 25559usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(fill_mtrr_mask) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).host_phys_bits) as usize - ptr as usize }, - 25560usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(host_phys_bits) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).host_phys_bits_limit) as usize - ptr as usize }, - 25561usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(host_phys_bits_limit) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).kvm_no_smi_migration) as usize - ptr as usize }, - 25562usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(kvm_no_smi_migration) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).kvm_pv_enforce_cpuid) as usize - ptr as usize }, - 25563usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(kvm_pv_enforce_cpuid) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).phys_bits) as usize - ptr as usize }, - 25564usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(phys_bits) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).apic_state) as usize - ptr as usize }, - 25568usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(apic_state) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpu_as_root) as usize - ptr as usize }, - 25576usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(cpu_as_root) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpu_as_mem) as usize - ptr as usize }, - 25584usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(cpu_as_mem) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).smram) as usize - ptr as usize }, - 25592usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(smram) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).machine_done) as usize - ptr as usize }, - 25600usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(machine_done) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).kvm_msr_buf) as usize - ptr as usize }, - 25624usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(kvm_msr_buf) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).node_id) as usize - ptr as usize }, - 25632usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(node_id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).socket_id) as usize - ptr as usize }, - 25636usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(socket_id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).die_id) as usize - ptr as usize }, - 25640usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(die_id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).core_id) as usize - ptr as usize }, - 25644usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(core_id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).thread_id) as usize - ptr as usize }, - 25648usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(thread_id) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).hv_max_vps) as usize - ptr as usize }, - 25652usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(hv_max_vps) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).xen_vapic) as usize - ptr as usize }, - 25656usize, - concat!( - "Offset of field: ", - stringify!(ArchCPU), - "::", - stringify!(xen_vapic) - ) - ); -} -impl Default for ArchCPU { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for ArchCPU { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write ! (f , "ArchCPU {{ parent_obj: {:?}, env: {:?}, vmsentry: {:?}, hyperv_vendor: {:?}, hyperv_synic_kvm_only: {:?}, hyperv_passthrough: {:?}, hyperv_no_nonarch_cs: {:?}, hyperv_vendor_id: {:?}, hyperv_interface_id: {:?}, hyperv_limits: {:?}, hyperv_enforce_cpuid: {:?}, check_cpuid: {:?}, enforce_cpuid: {:?}, force_features: {:?}, expose_kvm: {:?}, expose_tcg: {:?}, migratable: {:?}, migrate_smi_count: {:?}, max_features: {:?}, vmware_cpuid_freq: {:?}, cache_info_passthrough: {:?}, mwait: {:?}, filtered_features: {:?}, enable_pmu: {:?}, enable_lmce: {:?}, enable_l3_cache: {:?}, legacy_cache: {:?}, enable_cpuid_0xb: {:?}, full_cpuid_auto_level: {:?}, vendor_cpuid_only: {:?}, intel_pt_auto_level: {:?}, fill_mtrr_mask: {:?}, host_phys_bits: {:?}, kvm_no_smi_migration: {:?}, kvm_pv_enforce_cpuid: {:?}, apic_state: {:?}, cpu_as_root: {:?}, cpu_as_mem: {:?}, smram: {:?}, machine_done: {:?}, kvm_msr_buf: {:?}, xen_vapic: {:?} }}" , self . parent_obj , self . env , self . vmsentry , self . hyperv_vendor , self . hyperv_synic_kvm_only , self . hyperv_passthrough , self . hyperv_no_nonarch_cs , self . hyperv_vendor_id , self . hyperv_interface_id , self . hyperv_limits , self . hyperv_enforce_cpuid , self . check_cpuid , self . enforce_cpuid , self . force_features , self . expose_kvm , self . expose_tcg , self . migratable , self . migrate_smi_count , self . max_features , self . vmware_cpuid_freq , self . cache_info_passthrough , self . mwait , self . filtered_features , self . enable_pmu , self . enable_lmce , self . enable_l3_cache , self . legacy_cache , self . enable_cpuid_0xb , self . full_cpuid_auto_level , self . vendor_cpuid_only , self . intel_pt_auto_level , self . fill_mtrr_mask , self . host_phys_bits , self . kvm_no_smi_migration , self . kvm_pv_enforce_cpuid , self . apic_state , self . cpu_as_root , self . cpu_as_mem , self . smram , self . machine_done , self . kvm_msr_buf , self . xen_vapic) - } -} -extern "C" { - pub fn cpu_memory_rw_debug( - cpu: *mut CPUState, - addr: vaddr, - ptr: *mut ::std::os::raw::c_void, - len: usize, - is_write: bool, - ) -> ::std::os::raw::c_int; -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct RBNode { - pub rb_parent_color: usize, - pub rb_right: *mut RBNode, - pub rb_left: *mut RBNode, -} -#[test] -fn bindgen_test_layout_RBNode() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(RBNode)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(RBNode)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).rb_parent_color) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(RBNode), - "::", - stringify!(rb_parent_color) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).rb_right) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(RBNode), - "::", - stringify!(rb_right) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).rb_left) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(RBNode), - "::", - stringify!(rb_left) - ) - ); -} -impl Default for RBNode { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct RBRoot { - pub rb_node: *mut RBNode, -} -#[test] -fn bindgen_test_layout_RBRoot() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(RBRoot)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(RBRoot)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).rb_node) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(RBRoot), - "::", - stringify!(rb_node) - ) - ); -} -impl Default for RBRoot { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct RBRootLeftCached { - pub rb_root: RBRoot, - pub rb_leftmost: *mut RBNode, -} -#[test] -fn bindgen_test_layout_RBRootLeftCached() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(RBRootLeftCached)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(RBRootLeftCached)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).rb_root) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(RBRootLeftCached), - "::", - stringify!(rb_root) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).rb_leftmost) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(RBRootLeftCached), - "::", - stringify!(rb_leftmost) - ) - ); -} -impl Default for RBRootLeftCached { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct IntervalTreeNode { - pub rb: RBNode, - pub start: u64, - pub last: u64, - pub subtree_last: u64, -} -#[test] -fn bindgen_test_layout_IntervalTreeNode() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 48usize, - concat!("Size of: ", stringify!(IntervalTreeNode)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(IntervalTreeNode)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).rb) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(IntervalTreeNode), - "::", - stringify!(rb) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).start) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(IntervalTreeNode), - "::", - stringify!(start) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).last) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(IntervalTreeNode), - "::", - stringify!(last) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).subtree_last) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(IntervalTreeNode), - "::", - stringify!(subtree_last) - ) - ); -} -impl Default for IntervalTreeNode { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -pub type IntervalTreeRoot = RBRootLeftCached; -pub type abi_ulong = target_ulong; -pub type abi_long = target_long; -extern "C" { - #[doc = " --- Begin LibAFL code ---"] - pub fn pageflags_get_root() -> *mut IntervalTreeRoot; -} -extern "C" { - #[doc = " page_check_range\n @start: first byte of range\n @len: length of range\n @flags: flags required for each page\n\n Return true if every page in [@start, @start+@len) has @flags set.\n Return false if any page is unmapped. Thus testing flags == 0 is\n equivalent to testing for flags == PAGE_VALID."] - pub fn page_check_range( - start: target_ulong, - last: target_ulong, - flags: ::std::os::raw::c_int, - ) -> bool; -} -pub const MemOp_MO_8: MemOp = MemOp(0); -pub const MemOp_MO_16: MemOp = MemOp(1); -pub const MemOp_MO_32: MemOp = MemOp(2); -pub const MemOp_MO_64: MemOp = MemOp(3); -pub const MemOp_MO_128: MemOp = MemOp(4); -pub const MemOp_MO_256: MemOp = MemOp(5); -pub const MemOp_MO_512: MemOp = MemOp(6); -pub const MemOp_MO_1024: MemOp = MemOp(7); -pub const MemOp_MO_SIZE: MemOp = MemOp(7); -pub const MemOp_MO_SIGN: MemOp = MemOp(8); -pub const MemOp_MO_BSWAP: MemOp = MemOp(16); -pub const MemOp_MO_LE: MemOp = MemOp(0); -pub const MemOp_MO_BE: MemOp = MemOp(16); -pub const MemOp_MO_TE: MemOp = MemOp(0); -pub const MemOp_MO_ASHIFT: MemOp = MemOp(5); -pub const MemOp_MO_AMASK: MemOp = MemOp(224); -pub const MemOp_MO_UNALN: MemOp = MemOp(0); -pub const MemOp_MO_ALIGN_2: MemOp = MemOp(32); -pub const MemOp_MO_ALIGN_4: MemOp = MemOp(64); -pub const MemOp_MO_ALIGN_8: MemOp = MemOp(96); -pub const MemOp_MO_ALIGN_16: MemOp = MemOp(128); -pub const MemOp_MO_ALIGN_32: MemOp = MemOp(160); -pub const MemOp_MO_ALIGN_64: MemOp = MemOp(192); -pub const MemOp_MO_ALIGN: MemOp = MemOp(224); -pub const MemOp_MO_ATOM_SHIFT: MemOp = MemOp(8); -pub const MemOp_MO_ATOM_IFALIGN: MemOp = MemOp(0); -pub const MemOp_MO_ATOM_IFALIGN_PAIR: MemOp = MemOp(256); -pub const MemOp_MO_ATOM_WITHIN16: MemOp = MemOp(512); -pub const MemOp_MO_ATOM_WITHIN16_PAIR: MemOp = MemOp(768); -pub const MemOp_MO_ATOM_SUBALIGN: MemOp = MemOp(1024); -pub const MemOp_MO_ATOM_NONE: MemOp = MemOp(1280); -pub const MemOp_MO_ATOM_MASK: MemOp = MemOp(1792); -pub const MemOp_MO_UB: MemOp = MemOp(0); -pub const MemOp_MO_UW: MemOp = MemOp(1); -pub const MemOp_MO_UL: MemOp = MemOp(2); -pub const MemOp_MO_UQ: MemOp = MemOp(3); -pub const MemOp_MO_UO: MemOp = MemOp(4); -pub const MemOp_MO_SB: MemOp = MemOp(8); -pub const MemOp_MO_SW: MemOp = MemOp(9); -pub const MemOp_MO_SL: MemOp = MemOp(10); -pub const MemOp_MO_SQ: MemOp = MemOp(11); -pub const MemOp_MO_SO: MemOp = MemOp(12); -pub const MemOp_MO_LEUW: MemOp = MemOp(1); -pub const MemOp_MO_LEUL: MemOp = MemOp(2); -pub const MemOp_MO_LEUQ: MemOp = MemOp(3); -pub const MemOp_MO_LESW: MemOp = MemOp(9); -pub const MemOp_MO_LESL: MemOp = MemOp(10); -pub const MemOp_MO_LESQ: MemOp = MemOp(11); -pub const MemOp_MO_BEUW: MemOp = MemOp(17); -pub const MemOp_MO_BEUL: MemOp = MemOp(18); -pub const MemOp_MO_BEUQ: MemOp = MemOp(19); -pub const MemOp_MO_BESW: MemOp = MemOp(25); -pub const MemOp_MO_BESL: MemOp = MemOp(26); -pub const MemOp_MO_BESQ: MemOp = MemOp(27); -pub const MemOp_MO_TEUW: MemOp = MemOp(1); -pub const MemOp_MO_TEUL: MemOp = MemOp(2); -pub const MemOp_MO_TEUQ: MemOp = MemOp(3); -pub const MemOp_MO_TEUO: MemOp = MemOp(4); -pub const MemOp_MO_TESW: MemOp = MemOp(9); -pub const MemOp_MO_TESL: MemOp = MemOp(10); -pub const MemOp_MO_TESQ: MemOp = MemOp(11); -pub const MemOp_MO_SSIZE: MemOp = MemOp(15); -impl ::std::ops::BitOr for MemOp { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - MemOp(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for MemOp { - #[inline] - fn bitor_assign(&mut self, rhs: MemOp) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for MemOp { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - MemOp(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for MemOp { - #[inline] - fn bitand_assign(&mut self, rhs: MemOp) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct MemOp(pub ::std::os::raw::c_uint); -pub type MemOpIdx = u32; -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct tb_tc { - pub ptr: *const ::std::os::raw::c_void, - pub size: usize, -} -#[test] -fn bindgen_test_layout_tb_tc() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(tb_tc)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(tb_tc)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).ptr) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(tb_tc), - "::", - stringify!(ptr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).size) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(tb_tc), - "::", - stringify!(size) - ) - ); -} -impl Default for tb_tc { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct TranslationBlock { - pub pc: vaddr, - pub cs_base: u64, - pub flags: u32, - pub cflags: u32, - pub size: u16, - pub icount: u16, - pub tc: tb_tc, - pub itree: IntervalTreeNode, - pub jmp_lock: QemuSpin, - pub jmp_reset_offset: [u16; 2usize], - pub jmp_insn_offset: [u16; 2usize], - pub jmp_target_addr: [usize; 2usize], - pub jmp_list_head: usize, - pub jmp_list_next: [usize; 2usize], - pub jmp_dest: [usize; 2usize], -} -#[test] -fn bindgen_test_layout_TranslationBlock() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 168usize, - concat!("Size of: ", stringify!(TranslationBlock)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(TranslationBlock)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).pc) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(pc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cs_base) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(cs_base) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cflags) as usize - ptr as usize }, - 20usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(cflags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).size) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(size) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).icount) as usize - ptr as usize }, - 26usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(icount) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).tc) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(tc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).itree) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(itree) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).jmp_lock) as usize - ptr as usize }, - 96usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(jmp_lock) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).jmp_reset_offset) as usize - ptr as usize }, - 100usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(jmp_reset_offset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).jmp_insn_offset) as usize - ptr as usize }, - 104usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(jmp_insn_offset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).jmp_target_addr) as usize - ptr as usize }, - 112usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(jmp_target_addr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).jmp_list_head) as usize - ptr as usize }, - 128usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(jmp_list_head) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).jmp_list_next) as usize - ptr as usize }, - 136usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(jmp_list_next) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).jmp_dest) as usize - ptr as usize }, - 152usize, - concat!( - "Offset of field: ", - stringify!(TranslationBlock), - "::", - stringify!(jmp_dest) - ) - ); -} -impl Default for TranslationBlock { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -extern "C" { - pub fn target_mprotect( - start: abi_ulong, - len: abi_ulong, - prot: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn target_mmap( - start: abi_ulong, - len: abi_ulong, - prot: ::std::os::raw::c_int, - flags: ::std::os::raw::c_int, - fd: ::std::os::raw::c_int, - offset: off_t, - ) -> abi_long; -} -extern "C" { - pub fn target_munmap(start: abi_ulong, len: abi_ulong) -> ::std::os::raw::c_int; -} -extern "C" { - #[doc = " read_self_maps:\n\n Read /proc/self/maps and return a tree of MapInfo structures."] - pub fn read_self_maps() -> *mut IntervalTreeRoot; -} -extern "C" { - #[doc = " free_self_maps:\n @info: an interval tree\n\n Free a tree of MapInfo structures."] - pub fn free_self_maps(root: *mut IntervalTreeRoot); -} -extern "C" { - pub fn libafl_breakpoint_invalidate(cpu: *mut CPUState, pc: target_ulong); -} -extern "C" { - pub fn libafl_qemu_set_breakpoint(pc: target_ulong) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn libafl_qemu_remove_breakpoint(pc: target_ulong) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn libafl_qemu_trigger_breakpoint(cpu: *mut CPUState); -} -pub const libafl_exit_reason_kind_INTERNAL: libafl_exit_reason_kind = libafl_exit_reason_kind(0); -pub const libafl_exit_reason_kind_BREAKPOINT: libafl_exit_reason_kind = libafl_exit_reason_kind(1); -pub const libafl_exit_reason_kind_SYNC_EXIT: libafl_exit_reason_kind = libafl_exit_reason_kind(2); -impl ::std::ops::BitOr for libafl_exit_reason_kind { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - libafl_exit_reason_kind(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for libafl_exit_reason_kind { - #[inline] - fn bitor_assign(&mut self, rhs: libafl_exit_reason_kind) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for libafl_exit_reason_kind { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - libafl_exit_reason_kind(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for libafl_exit_reason_kind { - #[inline] - fn bitand_assign(&mut self, rhs: libafl_exit_reason_kind) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct libafl_exit_reason_kind(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct libafl_exit_reason_breakpoint { - pub addr: target_ulong, -} -#[test] -fn bindgen_test_layout_libafl_exit_reason_breakpoint() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(libafl_exit_reason_breakpoint)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(libafl_exit_reason_breakpoint)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).addr) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(libafl_exit_reason_breakpoint), - "::", - stringify!(addr) - ) - ); -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct libafl_exit_reason_sync_exit {} -#[test] -fn bindgen_test_layout_libafl_exit_reason_sync_exit() { - assert_eq!( - ::std::mem::size_of::(), - 0usize, - concat!("Size of: ", stringify!(libafl_exit_reason_sync_exit)) - ); - assert_eq!( - ::std::mem::align_of::(), - 1usize, - concat!("Alignment of ", stringify!(libafl_exit_reason_sync_exit)) - ); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct libafl_exit_reason_internal { - pub cause: ShutdownCause, - pub signal: ::std::os::raw::c_int, -} -#[test] -fn bindgen_test_layout_libafl_exit_reason_internal() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(libafl_exit_reason_internal)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(libafl_exit_reason_internal)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cause) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(libafl_exit_reason_internal), - "::", - stringify!(cause) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).signal) as usize - ptr as usize }, - 4usize, - concat!( - "Offset of field: ", - stringify!(libafl_exit_reason_internal), - "::", - stringify!(signal) - ) - ); -} -impl Default for libafl_exit_reason_internal { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -#[repr(C)] -#[derive(Copy, Clone)] -pub struct libafl_exit_reason { - pub kind: libafl_exit_reason_kind, - pub cpu: *mut CPUState, - pub next_pc: vaddr, - pub data: libafl_exit_reason__bindgen_ty_1, -} -#[repr(C)] -#[derive(Copy, Clone)] -pub union libafl_exit_reason__bindgen_ty_1 { - pub internal: libafl_exit_reason_internal, - pub breakpoint: libafl_exit_reason_breakpoint, - pub sync_exit: libafl_exit_reason_sync_exit, -} -#[test] -fn bindgen_test_layout_libafl_exit_reason__bindgen_ty_1() { - const UNINIT: ::std::mem::MaybeUninit = - ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(libafl_exit_reason__bindgen_ty_1)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!( - "Alignment of ", - stringify!(libafl_exit_reason__bindgen_ty_1) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).internal) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(libafl_exit_reason__bindgen_ty_1), - "::", - stringify!(internal) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).breakpoint) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(libafl_exit_reason__bindgen_ty_1), - "::", - stringify!(breakpoint) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).sync_exit) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(libafl_exit_reason__bindgen_ty_1), - "::", - stringify!(sync_exit) - ) - ); -} -impl Default for libafl_exit_reason__bindgen_ty_1 { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for libafl_exit_reason__bindgen_ty_1 { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!(f, "libafl_exit_reason__bindgen_ty_1 {{ union }}") - } -} -#[test] -fn bindgen_test_layout_libafl_exit_reason() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 32usize, - concat!("Size of: ", stringify!(libafl_exit_reason)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(libafl_exit_reason)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).kind) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(libafl_exit_reason), - "::", - stringify!(kind) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpu) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(libafl_exit_reason), - "::", - stringify!(cpu) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).next_pc) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(libafl_exit_reason), - "::", - stringify!(next_pc) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).data) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(libafl_exit_reason), - "::", - stringify!(data) - ) - ); -} -impl Default for libafl_exit_reason { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl ::std::fmt::Debug for libafl_exit_reason { - fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { - write!( - f, - "libafl_exit_reason {{ kind: {:?}, cpu: {:?}, data: {:?} }}", - self.kind, self.cpu, self.data - ) - } -} -extern "C" { - pub fn libafl_last_exit_cpu() -> *mut CPUState; -} -extern "C" { - pub fn libafl_exit_signal_vm_start(); -} -extern "C" { - pub fn libafl_exit_asap() -> bool; -} -extern "C" { - pub fn libafl_sync_exit_cpu(); -} -extern "C" { - pub fn libafl_exit_request_internal( - cpu: *mut CPUState, - pc: u64, - cause: ShutdownCause, - signal: ::std::os::raw::c_int, - ); -} -extern "C" { - pub fn libafl_exit_request_sync_backdoor(cpu: *mut CPUState, pc: target_ulong); -} -extern "C" { - pub fn libafl_exit_request_breakpoint(cpu: *mut CPUState, pc: target_ulong); -} -extern "C" { - pub fn libafl_get_exit_reason() -> *mut libafl_exit_reason; -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct libafl_mapinfo { - pub start: target_ulong, - pub end: target_ulong, - pub offset: target_ulong, - pub path: *const ::std::os::raw::c_char, - pub flags: ::std::os::raw::c_int, - pub is_priv: ::std::os::raw::c_int, - pub is_valid: bool, -} -#[test] -fn bindgen_test_layout_libafl_mapinfo() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 48usize, - concat!("Size of: ", stringify!(libafl_mapinfo)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(libafl_mapinfo)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).start) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(libafl_mapinfo), - "::", - stringify!(start) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).end) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(libafl_mapinfo), - "::", - stringify!(end) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).offset) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(libafl_mapinfo), - "::", - stringify!(offset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).path) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(libafl_mapinfo), - "::", - stringify!(path) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).flags) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(libafl_mapinfo), - "::", - stringify!(flags) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).is_priv) as usize - ptr as usize }, - 36usize, - concat!( - "Offset of field: ", - stringify!(libafl_mapinfo), - "::", - stringify!(is_priv) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).is_valid) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(libafl_mapinfo), - "::", - stringify!(is_valid) - ) - ); -} -impl Default for libafl_mapinfo { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -extern "C" { - pub fn libafl_maps_first(map_info: *mut IntervalTreeRoot) -> *mut IntervalTreeNode; -} -extern "C" { - pub fn libafl_maps_next( - pageflags_maps_node: *mut IntervalTreeNode, - proc_maps_node: *mut IntervalTreeRoot, - ret: *mut libafl_mapinfo, - ) -> *mut IntervalTreeNode; -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct AccelCPUClass { - pub parent_class: ObjectClass, - pub cpu_class_init: ::std::option::Option, - pub cpu_instance_init: ::std::option::Option, - pub cpu_target_realize: ::std::option::Option< - unsafe extern "C" fn(cpu: *mut CPUState, errp: *mut *mut Error) -> bool, - >, -} -#[test] -fn bindgen_test_layout_AccelCPUClass() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 120usize, - concat!("Size of: ", stringify!(AccelCPUClass)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(AccelCPUClass)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).parent_class) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(AccelCPUClass), - "::", - stringify!(parent_class) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpu_class_init) as usize - ptr as usize }, - 96usize, - concat!( - "Offset of field: ", - stringify!(AccelCPUClass), - "::", - stringify!(cpu_class_init) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpu_instance_init) as usize - ptr as usize }, - 104usize, - concat!( - "Offset of field: ", - stringify!(AccelCPUClass), - "::", - stringify!(cpu_instance_init) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).cpu_target_realize) as usize - ptr as usize }, - 112usize, - concat!( - "Offset of field: ", - stringify!(AccelCPUClass), - "::", - stringify!(cpu_target_realize) - ) - ); -} -impl Default for AccelCPUClass { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -pub const qemu_plugin_mem_rw_QEMU_PLUGIN_MEM_R: qemu_plugin_mem_rw = qemu_plugin_mem_rw(1); -pub const qemu_plugin_mem_rw_QEMU_PLUGIN_MEM_W: qemu_plugin_mem_rw = qemu_plugin_mem_rw(2); -pub const qemu_plugin_mem_rw_QEMU_PLUGIN_MEM_RW: qemu_plugin_mem_rw = qemu_plugin_mem_rw(3); -impl ::std::ops::BitOr for qemu_plugin_mem_rw { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - qemu_plugin_mem_rw(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for qemu_plugin_mem_rw { - #[inline] - fn bitor_assign(&mut self, rhs: qemu_plugin_mem_rw) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for qemu_plugin_mem_rw { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - qemu_plugin_mem_rw(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for qemu_plugin_mem_rw { - #[inline] - fn bitand_assign(&mut self, rhs: qemu_plugin_mem_rw) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct qemu_plugin_mem_rw(pub ::std::os::raw::c_uint); -#[doc = " typedef qemu_plugin_meminfo_t - opaque memory transaction handle\n\n This can be further queried using the qemu_plugin_mem_* query\n functions."] -pub type qemu_plugin_meminfo_t = u32; -extern "C" { - #[doc = " qemu_plugin_get_hwaddr() - return handle for memory operation\n @info: opaque memory info structure\n @vaddr: the virtual address of the memory operation\n\n For system emulation returns a qemu_plugin_hwaddr handle to query\n details about the actual physical address backing the virtual\n address. For linux-user guests it just returns NULL.\n\n This handle is *only* valid for the duration of the callback. Any\n information about the handle should be recovered before the\n callback returns."] - pub fn qemu_plugin_get_hwaddr( - info: qemu_plugin_meminfo_t, - vaddr: u64, - ) -> *mut qemu_plugin_hwaddr; -} -extern "C" { - #[doc = " qemu_plugin_hwaddr_phys_addr() - query physical address for memory operation\n @haddr: address handle from qemu_plugin_get_hwaddr()\n\n Returns the physical address associated with the memory operation\n\n Note that the returned physical address may not be unique if you are dealing\n with multiple address spaces."] - pub fn qemu_plugin_hwaddr_phys_addr(haddr: *const qemu_plugin_hwaddr) -> u64; -} -#[doc = " struct CPUPluginState - per-CPU state for plugins\n @event_mask: plugin event bitmap. Modified only via async work."] -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct CPUPluginState { - pub event_mask: [::std::os::raw::c_ulong; 1usize], -} -#[test] -fn bindgen_test_layout_CPUPluginState() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 8usize, - concat!("Size of: ", stringify!(CPUPluginState)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(CPUPluginState)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).event_mask) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(CPUPluginState), - "::", - stringify!(event_mask) - ) - ); -} -pub const TCGReg_TCG_REG_EAX: TCGReg = TCGReg(0); -pub const TCGReg_TCG_REG_ECX: TCGReg = TCGReg(1); -pub const TCGReg_TCG_REG_EDX: TCGReg = TCGReg(2); -pub const TCGReg_TCG_REG_EBX: TCGReg = TCGReg(3); -pub const TCGReg_TCG_REG_ESP: TCGReg = TCGReg(4); -pub const TCGReg_TCG_REG_EBP: TCGReg = TCGReg(5); -pub const TCGReg_TCG_REG_ESI: TCGReg = TCGReg(6); -pub const TCGReg_TCG_REG_EDI: TCGReg = TCGReg(7); -pub const TCGReg_TCG_REG_R8: TCGReg = TCGReg(8); -pub const TCGReg_TCG_REG_R9: TCGReg = TCGReg(9); -pub const TCGReg_TCG_REG_R10: TCGReg = TCGReg(10); -pub const TCGReg_TCG_REG_R11: TCGReg = TCGReg(11); -pub const TCGReg_TCG_REG_R12: TCGReg = TCGReg(12); -pub const TCGReg_TCG_REG_R13: TCGReg = TCGReg(13); -pub const TCGReg_TCG_REG_R14: TCGReg = TCGReg(14); -pub const TCGReg_TCG_REG_R15: TCGReg = TCGReg(15); -pub const TCGReg_TCG_REG_XMM0: TCGReg = TCGReg(16); -pub const TCGReg_TCG_REG_XMM1: TCGReg = TCGReg(17); -pub const TCGReg_TCG_REG_XMM2: TCGReg = TCGReg(18); -pub const TCGReg_TCG_REG_XMM3: TCGReg = TCGReg(19); -pub const TCGReg_TCG_REG_XMM4: TCGReg = TCGReg(20); -pub const TCGReg_TCG_REG_XMM5: TCGReg = TCGReg(21); -pub const TCGReg_TCG_REG_XMM6: TCGReg = TCGReg(22); -pub const TCGReg_TCG_REG_XMM7: TCGReg = TCGReg(23); -pub const TCGReg_TCG_REG_XMM8: TCGReg = TCGReg(24); -pub const TCGReg_TCG_REG_XMM9: TCGReg = TCGReg(25); -pub const TCGReg_TCG_REG_XMM10: TCGReg = TCGReg(26); -pub const TCGReg_TCG_REG_XMM11: TCGReg = TCGReg(27); -pub const TCGReg_TCG_REG_XMM12: TCGReg = TCGReg(28); -pub const TCGReg_TCG_REG_XMM13: TCGReg = TCGReg(29); -pub const TCGReg_TCG_REG_XMM14: TCGReg = TCGReg(30); -pub const TCGReg_TCG_REG_XMM15: TCGReg = TCGReg(31); -pub const TCGReg_TCG_REG_RAX: TCGReg = TCGReg(0); -pub const TCGReg_TCG_REG_RCX: TCGReg = TCGReg(1); -pub const TCGReg_TCG_REG_RDX: TCGReg = TCGReg(2); -pub const TCGReg_TCG_REG_RBX: TCGReg = TCGReg(3); -pub const TCGReg_TCG_REG_RSP: TCGReg = TCGReg(4); -pub const TCGReg_TCG_REG_RBP: TCGReg = TCGReg(5); -pub const TCGReg_TCG_REG_RSI: TCGReg = TCGReg(6); -pub const TCGReg_TCG_REG_RDI: TCGReg = TCGReg(7); -pub const TCGReg_TCG_AREG0: TCGReg = TCGReg(5); -pub const TCGReg_TCG_REG_CALL_STACK: TCGReg = TCGReg(4); -impl ::std::ops::BitOr for TCGReg { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - TCGReg(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for TCGReg { - #[inline] - fn bitor_assign(&mut self, rhs: TCGReg) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for TCGReg { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - TCGReg(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for TCGReg { - #[inline] - fn bitand_assign(&mut self, rhs: TCGReg) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct TCGReg(pub ::std::os::raw::c_uint); -pub const TCGType_TCG_TYPE_I32: TCGType = TCGType(0); -pub const TCGType_TCG_TYPE_I64: TCGType = TCGType(1); -pub const TCGType_TCG_TYPE_I128: TCGType = TCGType(2); -pub const TCGType_TCG_TYPE_V64: TCGType = TCGType(3); -pub const TCGType_TCG_TYPE_V128: TCGType = TCGType(4); -pub const TCGType_TCG_TYPE_V256: TCGType = TCGType(5); -pub const TCGType_TCG_TYPE_REG: TCGType = TCGType(1); -pub const TCGType_TCG_TYPE_PTR: TCGType = TCGType(1); -impl ::std::ops::BitOr for TCGType { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - TCGType(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for TCGType { - #[inline] - fn bitor_assign(&mut self, rhs: TCGType) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for TCGType { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - TCGType(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for TCGType { - #[inline] - fn bitand_assign(&mut self, rhs: TCGType) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct TCGType(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct TCGv_i64_d { - _unused: [u8; 0], -} -pub type TCGv_i64 = *mut TCGv_i64_d; -pub const TCGTempVal_TEMP_VAL_DEAD: TCGTempVal = TCGTempVal(0); -pub const TCGTempVal_TEMP_VAL_REG: TCGTempVal = TCGTempVal(1); -pub const TCGTempVal_TEMP_VAL_MEM: TCGTempVal = TCGTempVal(2); -pub const TCGTempVal_TEMP_VAL_CONST: TCGTempVal = TCGTempVal(3); -impl ::std::ops::BitOr for TCGTempVal { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - TCGTempVal(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for TCGTempVal { - #[inline] - fn bitor_assign(&mut self, rhs: TCGTempVal) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for TCGTempVal { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - TCGTempVal(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for TCGTempVal { - #[inline] - fn bitand_assign(&mut self, rhs: TCGTempVal) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct TCGTempVal(pub ::std::os::raw::c_uint); -pub const TCGTempKind_TEMP_EBB: TCGTempKind = TCGTempKind(0); -pub const TCGTempKind_TEMP_TB: TCGTempKind = TCGTempKind(1); -pub const TCGTempKind_TEMP_GLOBAL: TCGTempKind = TCGTempKind(2); -pub const TCGTempKind_TEMP_FIXED: TCGTempKind = TCGTempKind(3); -pub const TCGTempKind_TEMP_CONST: TCGTempKind = TCGTempKind(4); -impl ::std::ops::BitOr for TCGTempKind { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - TCGTempKind(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for TCGTempKind { - #[inline] - fn bitor_assign(&mut self, rhs: TCGTempKind) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for TCGTempKind { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - TCGTempKind(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for TCGTempKind { - #[inline] - fn bitand_assign(&mut self, rhs: TCGTempKind) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct TCGTempKind(pub ::std::os::raw::c_uint); -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct TCGTemp { - pub _bitfield_align_1: [u8; 0], - pub _bitfield_1: __BindgenBitfieldUnit<[u8; 6usize]>, - pub val: i64, - pub mem_base: *mut TCGTemp, - pub mem_offset: isize, - pub name: *const ::std::os::raw::c_char, - pub state: usize, - pub state_ptr: *mut ::std::os::raw::c_void, -} -#[test] -fn bindgen_test_layout_TCGTemp() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 56usize, - concat!("Size of: ", stringify!(TCGTemp)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(TCGTemp)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).val) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(TCGTemp), - "::", - stringify!(val) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mem_base) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(TCGTemp), - "::", - stringify!(mem_base) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mem_offset) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(TCGTemp), - "::", - stringify!(mem_offset) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).name) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(TCGTemp), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).state) as usize - ptr as usize }, - 40usize, - concat!( - "Offset of field: ", - stringify!(TCGTemp), - "::", - stringify!(state) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).state_ptr) as usize - ptr as usize }, - 48usize, - concat!( - "Offset of field: ", - stringify!(TCGTemp), - "::", - stringify!(state_ptr) - ) - ); -} -impl Default for TCGTemp { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl TCGTemp { - #[inline] - pub fn reg(&self) -> TCGReg { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 8u8) as u32) } - } - #[inline] - pub fn set_reg(&mut self, val: TCGReg) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 8u8, val as u64) - } - } - #[inline] - pub fn val_type(&self) -> TCGTempVal { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 8u8) as u32) } - } - #[inline] - pub fn set_val_type(&mut self, val: TCGTempVal) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 8u8, val as u64) - } - } - #[inline] - pub fn base_type(&self) -> TCGType { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u32) } - } - #[inline] - pub fn set_base_type(&mut self, val: TCGType) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 8u8, val as u64) - } - } - #[inline] - pub fn type_(&self) -> TCGType { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 8u8) as u32) } - } - #[inline] - pub fn set_type(&mut self, val: TCGType) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 8u8, val as u64) - } - } - #[inline] - pub fn kind(&self) -> TCGTempKind { - unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 3u8) as u32) } - } - #[inline] - pub fn set_kind(&mut self, val: TCGTempKind) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(32usize, 3u8, val as u64) - } - } - #[inline] - pub fn indirect_reg(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(35usize, 1u8) as u32) } - } - #[inline] - pub fn set_indirect_reg(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(35usize, 1u8, val as u64) - } - } - #[inline] - pub fn indirect_base(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(36usize, 1u8) as u32) } - } - #[inline] - pub fn set_indirect_base(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(36usize, 1u8, val as u64) - } - } - #[inline] - pub fn mem_coherent(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(37usize, 1u8) as u32) } - } - #[inline] - pub fn set_mem_coherent(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(37usize, 1u8, val as u64) - } - } - #[inline] - pub fn mem_allocated(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(38usize, 1u8) as u32) } - } - #[inline] - pub fn set_mem_allocated(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(38usize, 1u8, val as u64) - } - } - #[inline] - pub fn temp_allocated(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(39usize, 1u8) as u32) } - } - #[inline] - pub fn set_temp_allocated(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(39usize, 1u8, val as u64) - } - } - #[inline] - pub fn temp_subindex(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(40usize, 2u8) as u32) } - } - #[inline] - pub fn set_temp_subindex(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(40usize, 2u8, val as u64) - } - } - #[inline] - pub fn new_bitfield_1( - reg: TCGReg, - val_type: TCGTempVal, - base_type: TCGType, - type_: TCGType, - kind: TCGTempKind, - indirect_reg: ::std::os::raw::c_uint, - indirect_base: ::std::os::raw::c_uint, - mem_coherent: ::std::os::raw::c_uint, - mem_allocated: ::std::os::raw::c_uint, - temp_allocated: ::std::os::raw::c_uint, - temp_subindex: ::std::os::raw::c_uint, - ) -> __BindgenBitfieldUnit<[u8; 6usize]> { - let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 6usize]> = Default::default(); - __bindgen_bitfield_unit.set(0usize, 8u8, { - let reg: u32 = unsafe { ::std::mem::transmute(reg) }; - reg as u64 - }); - __bindgen_bitfield_unit.set(8usize, 8u8, { - let val_type: u32 = unsafe { ::std::mem::transmute(val_type) }; - val_type as u64 - }); - __bindgen_bitfield_unit.set(16usize, 8u8, { - let base_type: u32 = unsafe { ::std::mem::transmute(base_type) }; - base_type as u64 - }); - __bindgen_bitfield_unit.set(24usize, 8u8, { - let type_: u32 = unsafe { ::std::mem::transmute(type_) }; - type_ as u64 - }); - __bindgen_bitfield_unit.set(32usize, 3u8, { - let kind: u32 = unsafe { ::std::mem::transmute(kind) }; - kind as u64 - }); - __bindgen_bitfield_unit.set(35usize, 1u8, { - let indirect_reg: u32 = unsafe { ::std::mem::transmute(indirect_reg) }; - indirect_reg as u64 - }); - __bindgen_bitfield_unit.set(36usize, 1u8, { - let indirect_base: u32 = unsafe { ::std::mem::transmute(indirect_base) }; - indirect_base as u64 - }); - __bindgen_bitfield_unit.set(37usize, 1u8, { - let mem_coherent: u32 = unsafe { ::std::mem::transmute(mem_coherent) }; - mem_coherent as u64 - }); - __bindgen_bitfield_unit.set(38usize, 1u8, { - let mem_allocated: u32 = unsafe { ::std::mem::transmute(mem_allocated) }; - mem_allocated as u64 - }); - __bindgen_bitfield_unit.set(39usize, 1u8, { - let temp_allocated: u32 = unsafe { ::std::mem::transmute(temp_allocated) }; - temp_allocated as u64 - }); - __bindgen_bitfield_unit.set(40usize, 2u8, { - let temp_subindex: u32 = unsafe { ::std::mem::transmute(temp_subindex) }; - temp_subindex as u64 - }); - __bindgen_bitfield_unit - } -} -pub const TCGCallReturnKind_TCG_CALL_RET_NORMAL: TCGCallReturnKind = TCGCallReturnKind(0); -pub const TCGCallReturnKind_TCG_CALL_RET_BY_REF: TCGCallReturnKind = TCGCallReturnKind(1); -pub const TCGCallReturnKind_TCG_CALL_RET_BY_VEC: TCGCallReturnKind = TCGCallReturnKind(2); -impl ::std::ops::BitOr for TCGCallReturnKind { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - TCGCallReturnKind(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for TCGCallReturnKind { - #[inline] - fn bitor_assign(&mut self, rhs: TCGCallReturnKind) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for TCGCallReturnKind { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - TCGCallReturnKind(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for TCGCallReturnKind { - #[inline] - fn bitand_assign(&mut self, rhs: TCGCallReturnKind) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct TCGCallReturnKind(pub ::std::os::raw::c_uint); -pub const TCGCallArgumentKind_TCG_CALL_ARG_NORMAL: TCGCallArgumentKind = TCGCallArgumentKind(0); -pub const TCGCallArgumentKind_TCG_CALL_ARG_EVEN: TCGCallArgumentKind = TCGCallArgumentKind(1); -pub const TCGCallArgumentKind_TCG_CALL_ARG_EXTEND: TCGCallArgumentKind = TCGCallArgumentKind(2); -pub const TCGCallArgumentKind_TCG_CALL_ARG_EXTEND_U: TCGCallArgumentKind = TCGCallArgumentKind(3); -pub const TCGCallArgumentKind_TCG_CALL_ARG_EXTEND_S: TCGCallArgumentKind = TCGCallArgumentKind(4); -pub const TCGCallArgumentKind_TCG_CALL_ARG_BY_REF: TCGCallArgumentKind = TCGCallArgumentKind(5); -pub const TCGCallArgumentKind_TCG_CALL_ARG_BY_REF_N: TCGCallArgumentKind = TCGCallArgumentKind(6); -impl ::std::ops::BitOr for TCGCallArgumentKind { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - TCGCallArgumentKind(self.0 | other.0) - } -} -impl ::std::ops::BitOrAssign for TCGCallArgumentKind { - #[inline] - fn bitor_assign(&mut self, rhs: TCGCallArgumentKind) { - self.0 |= rhs.0; - } -} -impl ::std::ops::BitAnd for TCGCallArgumentKind { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - TCGCallArgumentKind(self.0 & other.0) - } -} -impl ::std::ops::BitAndAssign for TCGCallArgumentKind { - #[inline] - fn bitand_assign(&mut self, rhs: TCGCallArgumentKind) { - self.0 &= rhs.0; - } -} -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct TCGCallArgumentKind(pub ::std::os::raw::c_uint); -#[repr(C)] -#[repr(align(4))] -#[derive(Debug, Copy, Clone)] -pub struct TCGCallArgumentLoc { - pub _bitfield_align_1: [u8; 0], - pub _bitfield_1: __BindgenBitfieldUnit<[u8; 4usize]>, -} -#[test] -fn bindgen_test_layout_TCGCallArgumentLoc() { - assert_eq!( - ::std::mem::size_of::(), - 4usize, - concat!("Size of: ", stringify!(TCGCallArgumentLoc)) - ); - assert_eq!( - ::std::mem::align_of::(), - 4usize, - concat!("Alignment of ", stringify!(TCGCallArgumentLoc)) - ); -} -impl Default for TCGCallArgumentLoc { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl TCGCallArgumentLoc { - #[inline] - pub fn kind(&self) -> TCGCallArgumentKind { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 8u8) as u32) } - } - #[inline] - pub fn set_kind(&mut self, val: TCGCallArgumentKind) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 8u8, val as u64) - } - } - #[inline] - pub fn arg_slot(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 8u8) as u32) } - } - #[inline] - pub fn set_arg_slot(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(8usize, 8u8, val as u64) - } - } - #[inline] - pub fn ref_slot(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(16usize, 8u8) as u32) } - } - #[inline] - pub fn set_ref_slot(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(16usize, 8u8, val as u64) - } - } - #[inline] - pub fn arg_idx(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 4u8) as u32) } - } - #[inline] - pub fn set_arg_idx(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(24usize, 4u8, val as u64) - } - } - #[inline] - pub fn tmp_subindex(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(28usize, 2u8) as u32) } - } - #[inline] - pub fn set_tmp_subindex(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(28usize, 2u8, val as u64) - } - } - #[inline] - pub fn new_bitfield_1( - kind: TCGCallArgumentKind, - arg_slot: ::std::os::raw::c_uint, - ref_slot: ::std::os::raw::c_uint, - arg_idx: ::std::os::raw::c_uint, - tmp_subindex: ::std::os::raw::c_uint, - ) -> __BindgenBitfieldUnit<[u8; 4usize]> { - let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4usize]> = Default::default(); - __bindgen_bitfield_unit.set(0usize, 8u8, { - let kind: u32 = unsafe { ::std::mem::transmute(kind) }; - kind as u64 - }); - __bindgen_bitfield_unit.set(8usize, 8u8, { - let arg_slot: u32 = unsafe { ::std::mem::transmute(arg_slot) }; - arg_slot as u64 - }); - __bindgen_bitfield_unit.set(16usize, 8u8, { - let ref_slot: u32 = unsafe { ::std::mem::transmute(ref_slot) }; - ref_slot as u64 - }); - __bindgen_bitfield_unit.set(24usize, 4u8, { - let arg_idx: u32 = unsafe { ::std::mem::transmute(arg_idx) }; - arg_idx as u64 - }); - __bindgen_bitfield_unit.set(28usize, 2u8, { - let tmp_subindex: u32 = unsafe { ::std::mem::transmute(tmp_subindex) }; - tmp_subindex as u64 - }); - __bindgen_bitfield_unit - } -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct TCGHelperInfo { - pub func: *mut ::std::os::raw::c_void, - pub name: *const ::std::os::raw::c_char, - pub init: usize, - pub _bitfield_align_1: [u32; 0], - pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize]>, - pub in_: [TCGCallArgumentLoc; 14usize], -} -#[test] -fn bindgen_test_layout_TCGHelperInfo() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 88usize, - concat!("Size of: ", stringify!(TCGHelperInfo)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(TCGHelperInfo)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).func) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(TCGHelperInfo), - "::", - stringify!(func) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).name) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(TCGHelperInfo), - "::", - stringify!(name) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).init) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(TCGHelperInfo), - "::", - stringify!(init) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).in_) as usize - ptr as usize }, - 32usize, - concat!( - "Offset of field: ", - stringify!(TCGHelperInfo), - "::", - stringify!(in_) - ) - ); -} -impl Default for TCGHelperInfo { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -impl TCGHelperInfo { - #[inline] - pub fn typemask(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 32u8) as u32) } - } - #[inline] - pub fn set_typemask(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(0usize, 32u8, val as u64) - } - } - #[inline] - pub fn flags(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 8u8) as u32) } - } - #[inline] - pub fn set_flags(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(32usize, 8u8, val as u64) - } - } - #[inline] - pub fn nr_in(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(40usize, 8u8) as u32) } - } - #[inline] - pub fn set_nr_in(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(40usize, 8u8, val as u64) - } - } - #[inline] - pub fn nr_out(&self) -> ::std::os::raw::c_uint { - unsafe { ::std::mem::transmute(self._bitfield_1.get(48usize, 8u8) as u32) } - } - #[inline] - pub fn set_nr_out(&mut self, val: ::std::os::raw::c_uint) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(48usize, 8u8, val as u64) - } - } - #[inline] - pub fn out_kind(&self) -> TCGCallReturnKind { - unsafe { ::std::mem::transmute(self._bitfield_1.get(56usize, 8u8) as u32) } - } - #[inline] - pub fn set_out_kind(&mut self, val: TCGCallReturnKind) { - unsafe { - let val: u32 = ::std::mem::transmute(val); - self._bitfield_1.set(56usize, 8u8, val as u64) - } - } - #[inline] - pub fn new_bitfield_1( - typemask: ::std::os::raw::c_uint, - flags: ::std::os::raw::c_uint, - nr_in: ::std::os::raw::c_uint, - nr_out: ::std::os::raw::c_uint, - out_kind: TCGCallReturnKind, - ) -> __BindgenBitfieldUnit<[u8; 8usize]> { - let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize]> = Default::default(); - __bindgen_bitfield_unit.set(0usize, 32u8, { - let typemask: u32 = unsafe { ::std::mem::transmute(typemask) }; - typemask as u64 - }); - __bindgen_bitfield_unit.set(32usize, 8u8, { - let flags: u32 = unsafe { ::std::mem::transmute(flags) }; - flags as u64 - }); - __bindgen_bitfield_unit.set(40usize, 8u8, { - let nr_in: u32 = unsafe { ::std::mem::transmute(nr_in) }; - nr_in as u64 - }); - __bindgen_bitfield_unit.set(48usize, 8u8, { - let nr_out: u32 = unsafe { ::std::mem::transmute(nr_out) }; - nr_out as u64 - }); - __bindgen_bitfield_unit.set(56usize, 8u8, { - let out_kind: u32 = unsafe { ::std::mem::transmute(out_kind) }; - out_kind as u64 - }); - __bindgen_bitfield_unit - } -} -pub type TCGv = TCGv_i64; -#[doc = " struct qemu_plugin_hwaddr - opaque hw address handle"] -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct qemu_plugin_hwaddr { - pub is_io: bool, - pub is_store: bool, - pub phys_addr: hwaddr, - pub mr: *mut MemoryRegion, -} -#[test] -fn bindgen_test_layout_qemu_plugin_hwaddr() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 24usize, - concat!("Size of: ", stringify!(qemu_plugin_hwaddr)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(qemu_plugin_hwaddr)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).is_io) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(qemu_plugin_hwaddr), - "::", - stringify!(is_io) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).is_store) as usize - ptr as usize }, - 1usize, - concat!( - "Offset of field: ", - stringify!(qemu_plugin_hwaddr), - "::", - stringify!(is_store) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).phys_addr) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(qemu_plugin_hwaddr), - "::", - stringify!(phys_addr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).mr) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(qemu_plugin_hwaddr), - "::", - stringify!(mr) - ) - ); -} -impl Default for qemu_plugin_hwaddr { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -extern "C" { - #[doc = " tlb_plugin_lookup: query last TLB lookup\n @cpu: cpu environment\n\n This function can be used directly after a memory operation to\n query information about the access. It is used by the plugin\n infrastructure to expose more information about the address.\n\n It would only fail if not called from an instrumented memory access\n which would be an abuse of the API."] - pub fn tlb_plugin_lookup( - cpu: *mut CPUState, - addr: vaddr, - mmu_idx: ::std::os::raw::c_int, - is_store: bool, - data: *mut qemu_plugin_hwaddr, - ) -> bool; -} -extern "C" { - pub fn libafl_gen_edge( - cpu: *mut CPUState, - src_block: target_ulong, - dst_block: target_ulong, - exit_n: ::std::os::raw::c_int, - cs_base: target_ulong, - flags: u32, - cflags: ::std::os::raw::c_int, - ) -> *mut TranslationBlock; -} -extern "C" { - pub fn libafl_gen_cmp(pc: target_ulong, op0: TCGv, op1: TCGv, ot: MemOp); -} -extern "C" { - pub fn libafl_gen_backdoor(pc: target_ulong); -} -#[repr(C)] -#[derive(Debug, Copy, Clone)] -pub struct libafl_hook { - pub addr: target_ulong, - pub data: u64, - pub num: usize, - pub helper_info: TCGHelperInfo, - pub next: *mut libafl_hook, -} -#[test] -fn bindgen_test_layout_libafl_hook() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 120usize, - concat!("Size of: ", stringify!(libafl_hook)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(libafl_hook)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).addr) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(libafl_hook), - "::", - stringify!(addr) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).data) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(libafl_hook), - "::", - stringify!(data) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).num) as usize - ptr as usize }, - 16usize, - concat!( - "Offset of field: ", - stringify!(libafl_hook), - "::", - stringify!(num) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).helper_info) as usize - ptr as usize }, - 24usize, - concat!( - "Offset of field: ", - stringify!(libafl_hook), - "::", - stringify!(helper_info) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).next) as usize - ptr as usize }, - 112usize, - concat!( - "Offset of field: ", - stringify!(libafl_hook), - "::", - stringify!(next) - ) - ); -} -impl Default for libafl_hook { - fn default() -> Self { - let mut s = ::std::mem::MaybeUninit::::uninit(); - unsafe { - ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); - s.assume_init() - } - } -} -extern "C" { - pub fn libafl_qemu_set_hook( - pc: target_ulong, - callback: ::std::option::Option, - data: u64, - invalidate: ::std::os::raw::c_int, - ) -> usize; -} -extern "C" { - pub fn libafl_qemu_remove_hooks_at( - addr: target_ulong, - invalidate: ::std::os::raw::c_int, - ) -> usize; -} -extern "C" { - pub fn libafl_qemu_remove_hook( - num: usize, - invalidate: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn libafl_search_hook(addr: target_ulong) -> *mut libafl_hook; -} -extern "C" { - pub fn libafl_add_backdoor_hook( - exec: ::std::option::Option< - unsafe extern "C" fn(data: u64, cpu: *mut CPUArchState, pc: target_ulong), - >, - data: u64, - ) -> usize; -} -extern "C" { - pub fn libafl_qemu_remove_backdoor_hook( - num: usize, - invalidate: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn libafl_add_edge_hook( - gen: ::std::option::Option< - unsafe extern "C" fn(data: u64, src: target_ulong, dst: target_ulong) -> u64, - >, - exec: ::std::option::Option, - data: u64, - ) -> usize; -} -extern "C" { - pub fn libafl_qemu_remove_edge_hook( - num: usize, - invalidate: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn libafl_qemu_edge_hook_set_jit( - num: usize, - jit: ::std::option::Option usize>, - ) -> bool; -} -extern "C" { - pub fn libafl_add_block_hook( - gen: ::std::option::Option u64>, - post_gen: ::std::option::Option< - unsafe extern "C" fn(data: u64, pc: target_ulong, block_length: target_ulong), - >, - exec: ::std::option::Option, - data: u64, - ) -> usize; -} -extern "C" { - pub fn libafl_qemu_remove_block_hook( - num: usize, - invalidate: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn libafl_qemu_block_hook_set_jit( - num: usize, - jit: ::std::option::Option usize>, - ) -> bool; -} -extern "C" { - pub fn libafl_add_read_hook( - gen: ::std::option::Option< - unsafe extern "C" fn( - data: u64, - pc: target_ulong, - addr: *mut TCGTemp, - oi: MemOpIdx, - ) -> u64, - >, - exec1: ::std::option::Option, - exec2: ::std::option::Option, - exec4: ::std::option::Option, - exec8: ::std::option::Option, - execN: ::std::option::Option< - unsafe extern "C" fn(data: u64, id: u64, addr: target_ulong, size: usize), - >, - data: u64, - ) -> usize; -} -extern "C" { - pub fn libafl_add_write_hook( - gen: ::std::option::Option< - unsafe extern "C" fn( - data: u64, - pc: target_ulong, - addr: *mut TCGTemp, - oi: MemOpIdx, - ) -> u64, - >, - exec1: ::std::option::Option, - exec2: ::std::option::Option, - exec4: ::std::option::Option, - exec8: ::std::option::Option, - execN: ::std::option::Option< - unsafe extern "C" fn(data: u64, id: u64, addr: target_ulong, size: usize), - >, - data: u64, - ) -> usize; -} -extern "C" { - pub fn libafl_qemu_remove_read_hook( - num: usize, - invalidate: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn libafl_qemu_remove_write_hook( - num: usize, - invalidate: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn libafl_gen_read(addr: *mut TCGTemp, oi: MemOpIdx); -} -extern "C" { - pub fn libafl_gen_write(addr: *mut TCGTemp, oi: MemOpIdx); -} -extern "C" { - pub fn libafl_add_cmp_hook( - gen: ::std::option::Option< - unsafe extern "C" fn(data: u64, pc: target_ulong, size: usize) -> u64, - >, - exec1: ::std::option::Option, - exec2: ::std::option::Option, - exec4: ::std::option::Option, - exec8: ::std::option::Option, - data: u64, - ) -> usize; -} -extern "C" { - pub fn libafl_qemu_remove_cmp_hook( - num: usize, - invalidate: ::std::os::raw::c_int, - ) -> ::std::os::raw::c_int; -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct syshook_ret { - pub retval: target_ulong, - pub skip_syscall: bool, -} -#[test] -fn bindgen_test_layout_syshook_ret() { - const UNINIT: ::std::mem::MaybeUninit = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::(), - 16usize, - concat!("Size of: ", stringify!(syshook_ret)) - ); - assert_eq!( - ::std::mem::align_of::(), - 8usize, - concat!("Alignment of ", stringify!(syshook_ret)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).retval) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(syshook_ret), - "::", - stringify!(retval) - ) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).skip_syscall) as usize - ptr as usize }, - 8usize, - concat!( - "Offset of field: ", - stringify!(syshook_ret), - "::", - stringify!(skip_syscall) - ) - ); -} -extern "C" { - pub fn libafl_add_pre_syscall_hook( - callback: ::std::option::Option< - unsafe extern "C" fn( - data: u64, - sys_num: ::std::os::raw::c_int, - arg0: target_ulong, - arg1: target_ulong, - arg2: target_ulong, - arg3: target_ulong, - arg4: target_ulong, - arg5: target_ulong, - arg6: target_ulong, - arg7: target_ulong, - ) -> syshook_ret, - >, - data: u64, - ) -> usize; -} -extern "C" { - pub fn libafl_add_post_syscall_hook( - callback: ::std::option::Option< - unsafe extern "C" fn( - data: u64, - ret: target_ulong, - sys_num: ::std::os::raw::c_int, - arg0: target_ulong, - arg1: target_ulong, - arg2: target_ulong, - arg3: target_ulong, - arg4: target_ulong, - arg5: target_ulong, - arg6: target_ulong, - arg7: target_ulong, - ) -> target_ulong, - >, - data: u64, - ) -> usize; -} -extern "C" { - pub fn libafl_qemu_remove_pre_syscall_hook(num: usize) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn libafl_qemu_remove_post_syscall_hook(num: usize) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn libafl_add_new_thread_hook( - callback: ::std::option::Option bool>, - data: u64, - ) -> usize; -} -extern "C" { - pub fn libafl_qemu_remove_new_thread_hook(num: usize) -> ::std::os::raw::c_int; -} -extern "C" { - pub fn libafl_tcg_gen_asan(addr: *mut TCGTemp, size: usize); -} -extern "C" { - pub fn libafl_jit_trace_edge_hitcount(data: u64, id: u64) -> usize; -} -extern "C" { - pub fn libafl_jit_trace_edge_single(data: u64, id: u64) -> usize; -} -extern "C" { - pub fn libafl_jit_trace_block_hitcount(data: u64, id: u64) -> usize; -} -extern "C" { - pub fn libafl_jit_trace_block_single(data: u64, id: u64) -> usize; -} -extern "C" { - pub fn libafl_qemu_host_page_size() -> usize; -} -#[repr(C)] -#[derive(Debug, Default, Copy, Clone)] -pub struct kvm_dirty_gfn { - pub _address: u8, -} diff --git a/libafl_qemu/libqasan/libqasan.c b/libafl_qemu/libqasan/libqasan.c index 5d6397a4a7..b91dd0f212 100644 --- a/libafl_qemu/libqasan/libqasan.c +++ b/libafl_qemu/libqasan/libqasan.c @@ -361,6 +361,7 @@ void qasan_dealloc(const char *start) { int qasan_swap(int state) { QASAN_DEBUG("SWAP: %d\n", state); /* Do Nothing */ + return 0; } #endif diff --git a/libafl_qemu/libqasan/printf/README.md b/libafl_qemu/libqasan/printf/README.md index 76f8962d13..17f2cec37a 100644 --- a/libafl_qemu/libqasan/printf/README.md +++ b/libafl_qemu/libqasan/printf/README.md @@ -196,7 +196,6 @@ Running with the `--wait-for-keypress exit` option waits for the enter key after ## Projects Using printf - [turnkeyboard](https://github.com/mpaland/turnkeyboard) uses printf as log and generic tty (formatting) output. - printf is part of [embeddedartistry/libc](https://github.com/embeddedartistry/libc), a libc targeted for embedded systems usage. -- The [Hatchling Platform]( https://github.com/adrian3git/HatchlingPlatform) uses printf. (Just send me a mail/issue/PR to get *your* project listed here) diff --git a/libafl_qemu/libqasan/string.c b/libafl_qemu/libqasan/string.c index 334f3851e1..80d9e2f01c 100644 --- a/libafl_qemu/libqasan/string.c +++ b/libafl_qemu/libqasan/string.c @@ -128,9 +128,15 @@ int __libqasan_strncmp(const char *str1, const char *str2, size_t len) { return 0; } +unsigned char __libqasan_tolower(unsigned char c) { + if (c >= 'A' && c <= 'Z') return c | 0x20; + return c; +} + int __libqasan_strcasecmp(const char *str1, const char *str2) { while (1) { - const unsigned char c1 = tolower(*str1), c2 = tolower(*str2); + const unsigned char c1 = __libqasan_tolower(*str1), + c2 = __libqasan_tolower(*str2); if (c1 != c2) { return c1 - c2; } if (!c1) { return 0; } @@ -143,7 +149,8 @@ int __libqasan_strcasecmp(const char *str1, const char *str2) { int __libqasan_strncasecmp(const char *str1, const char *str2, size_t len) { while (len--) { - const unsigned char c1 = tolower(*str1), c2 = tolower(*str2); + const unsigned char c1 = __libqasan_tolower(*str1), + c2 = __libqasan_tolower(*str2); if (c1 != c2) { return c1 - c2; } if (!c1) { return 0; } @@ -204,7 +211,7 @@ char *__libqasan_strcasestr(const char *haystack, const char *needle) { const char *n = needle; const char *h = haystack; - while (*n && *h && tolower(*n) == tolower(*h)) { + while (*n && *h && __libqasan_tolower(*n) == __libqasan_tolower(*h)) { n++; h++; } diff --git a/libafl_qemu/runtime/libafl_qemu.h b/libafl_qemu/runtime/libafl_qemu.h index 4ea4fd4233..ace7259f91 100644 --- a/libafl_qemu/runtime/libafl_qemu.h +++ b/libafl_qemu/runtime/libafl_qemu.h @@ -1,6 +1,11 @@ #ifndef LIBAFL_QEMU_H #define LIBAFL_QEMU_H +#include "libafl_qemu_defs.h" +#include "libafl_qemu_arch.h" + +#define LIBAFL_QEMU_PRINTF_MAX_SIZE 4096 + /** * LibAFL QEMU header file. * @@ -11,253 +16,40 @@ * the commands. */ -/* === The private part starts here === */ - -/* This part should not be useful for most people. Callable commands are - * available at the end of this file. */ - -#define STRINGIFY(s) #s -#define XSTRINGIFY(s) STRINGIFY(s) - -// Target Specific imports / definitions -#ifdef _WIN32 - #include - #include - -typedef UINT64 libafl_word; - #define LIBAFL_CALLING_CONVENTION __fastcall - -#else - #include - - #if defined(__x86_64__) || defined(__aarch64__) - typedef uint64_t libafl_word; - #define LIBAFL_CALLING_CONVENTION __attribute__(()) - #endif - - #ifdef __arm__ - typedef uint32_t libafl_word; - #define LIBAFL_CALLING_CONVENTION __attribute__(()) - #endif -#endif - -#define LIBAFL_SYNC_EXIT_OPCODE 0x66f23a0f -#define LIBAFL_BACKDOOR_OPCODE 0x44f23a0f - -#define LIBAFL_QEMU_HDR_VERSION_NUMBER 0111 // TODO: find a nice way to set it. - -typedef enum LibaflQemuCommand { - LIBAFL_QEMU_COMMAND_START_VIRT = 0, - LIBAFL_QEMU_COMMAND_START_PHYS = 1, - LIBAFL_QEMU_COMMAND_INPUT_VIRT = 2, - LIBAFL_QEMU_COMMAND_INPUT_PHYS = 3, - LIBAFL_QEMU_COMMAND_END = 4, - LIBAFL_QEMU_COMMAND_SAVE = 5, - LIBAFL_QEMU_COMMAND_LOAD = 6, - LIBAFL_QEMU_COMMAND_VERSION = 7, - LIBAFL_QEMU_COMMAND_VADDR_FILTER_ALLOW = 8, -} LibaflExit; - -typedef enum LibaflQemuEndStatus { +enum LibaflQemuEndStatus { LIBAFL_QEMU_END_UNKNOWN = 0, LIBAFL_QEMU_END_OK = 1, LIBAFL_QEMU_END_CRASH = 2, -} LibaflExitEndParams; +}; -#ifdef _WIN32 - #define LIBAFL_DEFINE_FUNCTIONS(name, _opcode) \ - #ifdef __cplusplus \ - extern "C" { \ - #endif \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call0(libafl_word action); \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call1(libafl_word action, \ - ##name## libafl_word arg1); \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call2(libafl_word action, \ - libafl_word arg1, \ - libafl_word arg2); \ - #ifdef __cplusplus \ - } \ - #endif -#else +libafl_word libafl_qemu_start_virt(void *buf_vaddr, libafl_word max_len); - #if defined(__x86_64__) - #define LIBAFL_DEFINE_FUNCTIONS(name, opcode) \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call0( \ - libafl_word action) { \ - libafl_word ret; \ - __asm__ volatile ( \ - "mov %1, %%rax\n" \ - ".4byte " XSTRINGIFY(opcode) "\n" \ - "mov %%rax, %0\n" \ - : "=g"(ret) \ - : "g"(action) \ - : "%rax" \ - ); \ - return ret; \ - } \ - \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call1( \ - libafl_word action, libafl_word arg1) { \ - libafl_word ret; \ - __asm__ volatile ( \ - "mov %1, %%rax\n" \ - "mov %2, %%rdi\n" \ - ".4byte " XSTRINGIFY(opcode) "\n" \ - "mov %%rax, %0\n" \ - : "=g"(ret) \ - : "g"(action), "g"(arg1) \ - : "%rax", "%rdi" \ - ); \ - return ret; \ - } \ - \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call2( \ - libafl_word action, libafl_word arg1, libafl_word arg2) { \ - libafl_word ret; \ - __asm__ volatile ( \ - "mov %1, %%rax\n" \ - "mov %2, %%rdi\n" \ - "mov %3, %%rsi\n" \ - ".4byte " XSTRINGIFY(opcode) "\n" \ - "mov %%rax, %0\n" \ - : "=g"(ret) \ - : "g"(action), "g"(arg1), "g"(arg2) \ - : "%rax", "%rdi", "%rsi" \ - ); \ - return ret; \ - } +libafl_word libafl_qemu_start_phys(void *buf_paddr, libafl_word max_len); - #elif defined(__arm__) - #define LIBAFL_DEFINE_FUNCTIONS(name, opcode) \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call0( \ - libafl_word action) { \ - libafl_word ret; \ - __asm__ volatile ( \ - "mov r0, %1\n" \ - ".word " XSTRINGIFY(opcode) "\n" \ - "mov %0, r0\n" \ - : "=r"(ret) \ - : "r"(action) \ - : "r0" \ - ); \ - return ret; \ - } \ - \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call1( \ - libafl_word action, libafl_word arg1) { \ - libafl_word ret; \ - __asm__ volatile ( \ - "mov r0, %1\n" \ - "mov r1, %2\n" \ - ".word " XSTRINGIFY(opcode) "\n" \ - "mov %0, r0\n" \ - : "=r"(ret) \ - : "r"(action), "r"(arg1) \ - : "r0", "r1" \ - ); \ - return ret; \ - } \ - \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call2( \ - libafl_word action, libafl_word arg1, libafl_word arg2) { \ - libafl_word ret; \ - __asm__ volatile ( \ - "mov r0, %1\n" \ - "mov r1, %2\n" \ - "mov r2, %3\n" \ - ".word " XSTRINGIFY(opcode) "\n" \ - "mov %0, r0\n" \ - : "=r"(ret) \ - : "r"(action), "r"(arg1), "r"(arg2) \ - : "r0", "r1", "r2" \ - ); \ - return ret; \ - } +libafl_word libafl_qemu_input_virt(void *buf_vaddr, libafl_word max_len); - #elif defined(__aarch64__) - #define LIBAFL_DEFINE_FUNCTIONS(name, opcode) \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call0( \ - libafl_word action) { \ - libafl_word ret; \ - __asm__ volatile ( \ - "mov x0, %1\n" \ - ".word " XSTRINGIFY(opcode) "\n" \ - "mov %0, x0\n" \ - : "=r"(ret) \ - : "r"(action) \ - : "x0" \ - ); \ - return ret; \ - } \ - \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call1( \ - libafl_word action, libafl_word arg1) { \ - libafl_word ret; \ - __asm__ volatile ( \ - "mov x0, %1\n" \ - "mov x1, %2\n" \ - ".word " XSTRINGIFY(opcode) "\n" \ - "mov %0, x0\n" \ - : "=r"(ret) \ - : "r"(action), "r"(arg1) \ - : "x0", "x1" \ - ); \ - return ret; \ - } \ - \ - libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call2( \ - libafl_word action, libafl_word arg1, libafl_word arg2) { \ - libafl_word ret; \ - __asm__ volatile ( \ - "mov x0, %1\n" \ - "mov x1, %2\n" \ - "mov x2, %3\n" \ - ".word " XSTRINGIFY(opcode) "\n" \ - "mov %0, x0\n" \ - : "=r"(ret) \ - : "r"(action), "r"(arg1), "r"(arg2) \ - : "x0", "x1", "x2" \ - ); \ - return ret; \ - } - #else - #warning "LibAFL QEMU Runtime does not support your architecture yet, please leave an issue." - #endif - -#endif - -// Generates sync exit functions -LIBAFL_DEFINE_FUNCTIONS(sync_exit, LIBAFL_SYNC_EXIT_OPCODE) - -// Generates backdoor functions -LIBAFL_DEFINE_FUNCTIONS(backdoor, LIBAFL_BACKDOOR_OPCODE) - -/* === The private part ends here === */ - -/* === The public part starts here === */ - -/* LibAFL QEMU Commands */ - -#define LIBAFL_QEMU_START_VIRT(buf_vaddr, max_len) \ - _libafl_sync_exit_call2(LIBAFL_QEMU_COMMAND_START_VIRT, buf_vaddr, max_len) - -#define LIBAFL_QEMU_START_PHYS(buf_paddr, max_len) \ - _libafl_sync_exit_call2(LIBAFL_QEMU_COMMAND_START_PHYS, buf_paddr, max_len) - -#define LIBAFL_QEMU_INPUT_VIRT(buf_vaddr, max_len) \ - _libafl_sync_exit_call2(LIBAFL_QEMU_COMMAND_INPUT_VIRT, buf_vaddr, max_len) - -#define LIBAFL_QEMU_INPUT_PHYS(buf_paddr, max_len) \ - _libafl_exit_call2(LIBAFL_QEMU_COMMAND_INPUT_PHYS, buf_paddr, max_len) - -#define LIBAFL_QEMU_END(status) _libafl_sync_exit_call1(LIBAFL_QEMU_COMMAND_END, status) - -#define LIBAFL_QEMU_SAVE() _libafl_sync_exit_call0(LIBAFL_QEMU_COMMAND_SAVE) - -#define LIBAFL_QEMU_LOAD() _libafl_sync_exit_call0(LIBAFL_QEMU_COMMAND_LOAD) - -#define LIBAFL_QEMU_VERSION() _libafl_sync_exit_call0(LIBAFL_QEMU_COMMAND_VERSION) - -/* === The public part ends here === */ +libafl_word libafl_qemu_input_phys(void *buf_paddr, libafl_word max_len); + +void libafl_qemu_end(enum LibaflQemuEndStatus status); + +void libafl_qemu_save(void); + +void libafl_qemu_load(void); + +libafl_word libafl_qemu_version(void); + +void libafl_qemu_page_current_allow(void); + +void libafl_qemu_internal_error(void); + +void __attribute__((format(printf, 1, 2))) lqprintf(const char *fmt, ...); + +void libafl_qemu_test(void); + +void libafl_qemu_trace_vaddr_range(libafl_word start, libafl_word end); + +void libafl_qemu_trace_vaddr_size(libafl_word start, libafl_word size); + +#include "libafl_qemu_impl.h" #endif diff --git a/libafl_qemu/runtime/libafl_qemu_arch.h b/libafl_qemu/runtime/libafl_qemu_arch.h new file mode 100644 index 0000000000..739d2c95da --- /dev/null +++ b/libafl_qemu/runtime/libafl_qemu_arch.h @@ -0,0 +1,295 @@ +#ifndef LIBAFL_QEMU_ARCH +#define LIBAFL_QEMU_ARCH + +// TODO: slit this in subfiles? + +#include "libafl_qemu_defs.h" + +/* Arch-specific definitions + * + * Each architecture should define: + * - [type] libafl_word: native word on the target architecture (often the size of a register) + * - [macro] define STDIO_SUPPORT: if defined, more commands will be supported. + * - [macro] LIBAFL_CALLING_CONVENTION: the calling convention to follow for the architecture. it should be the same as the one use in libafl qemu. + * - [function] snprintf: the standard POSIX snprintf definition. + * - [function] va_{start,arg,end}: standard functions to handle variadic functions + */ + +// Target Specific imports / definitions +#if defined(_WIN32) + // Windows + #include + #include + + typedef UINT64 libafl_word; + #define LIBAFL_CALLING_CONVENTION __fastcall + #define STDIO_SUPPORT +#elif defined(__linux__) + // Linux + #ifdef __KERNEL__ + // Linux kernel + #include + + #if defined(__x86_64__) || defined(__aarch64__) + typedef __u64 libafl_word; + #define LIBAFL_CALLING_CONVENTION __attribute__(()) + #endif + + #ifdef __arm__ + typedef __u32 libafl_word; + #define LIBAFL_CALLING_CONVENTION __attribute__(()) + #endif + #else + // Linux userland + #include + #include + #include + + #define noinline __attribute__((noinline)) + + #if defined(__x86_64__) || defined(__aarch64__) + typedef uint64_t libafl_word; + #define LIBAFL_CALLING_CONVENTION __attribute__(()) + #endif + + #ifdef __arm__ + typedef uint32_t libafl_word; + #define LIBAFL_CALLING_CONVENTION __attribute__(()) + #endif + #endif + + #define STDIO_SUPPORT +#else + // Other + #include + #include + + #define noinline __attribute__((noinline)) + + #if defined(__x86_64__) || defined(__aarch64__) + typedef uint64_t libafl_word; + #define LIBAFL_CALLING_CONVENTION __attribute__(()) + #endif + + #ifdef __arm__ + typedef uint32_t libafl_word; + #define LIBAFL_CALLING_CONVENTION __attribute__(()) + #endif +#endif +#endif + +#ifdef _WIN32 + #define LIBAFL_DEFINE_FUNCTIONS(name, _opcode) \ + #ifdef __cplusplus \ + extern "C" { \ + #endif \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call0(libafl_word action); \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call1(libafl_word action, \ + ##name## libafl_word arg1); \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call2(libafl_word action, \ + libafl_word arg1, \ + libafl_word arg2); \ + #ifdef __cplusplus \ + } \ + #endif +#else + #if defined(__x86_64__) + #define LIBAFL_DEFINE_FUNCTIONS(name, opcode) \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call0( \ + libafl_word action) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mov %1, %%rax\n" \ + ".4byte " XSTRINGIFY(opcode) "\n" \ + "mov %%rax, %0\n" \ + : "=g"(ret) \ + : "g"(action) \ + : "%rax" \ + ); \ + return ret; \ + } \ + \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call1( \ + libafl_word action, libafl_word arg1) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mov %1, %%rax\n" \ + "mov %2, %%rdi\n" \ + ".4byte " XSTRINGIFY(opcode) "\n" \ + "mov %%rax, %0\n" \ + : "=g"(ret) \ + : "g"(action), "g"(arg1) \ + : "%rax", "%rdi" \ + ); \ + return ret; \ + } \ + \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call2( \ + libafl_word action, libafl_word arg1, libafl_word arg2) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mov %1, %%rax\n" \ + "mov %2, %%rdi\n" \ + "mov %3, %%rsi\n" \ + ".4byte " XSTRINGIFY(opcode) "\n" \ + "mov %%rax, %0\n" \ + : "=g"(ret) \ + : "g"(action), "g"(arg1), "g"(arg2) \ + : "%rax", "%rdi", "%rsi" \ + ); \ + return ret; \ + } + + #elif defined(__arm__) + #define LIBAFL_DEFINE_FUNCTIONS(name, opcode) \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call0( \ + libafl_word action) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mov r0, %1\n" \ + ".word " XSTRINGIFY(opcode) "\n" \ + "mov %0, r0\n" \ + : "=r"(ret) \ + : "r"(action) \ + : "r0" \ + ); \ + return ret; \ + } \ + \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call1( \ + libafl_word action, libafl_word arg1) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mov r0, %1\n" \ + "mov r1, %2\n" \ + ".word " XSTRINGIFY(opcode) "\n" \ + "mov %0, r0\n" \ + : "=r"(ret) \ + : "r"(action), "r"(arg1) \ + : "r0", "r1" \ + ); \ + return ret; \ + } \ + \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call2( \ + libafl_word action, libafl_word arg1, libafl_word arg2) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mov r0, %1\n" \ + "mov r1, %2\n" \ + "mov r2, %3\n" \ + ".word " XSTRINGIFY(opcode) "\n" \ + "mov %0, r0\n" \ + : "=r"(ret) \ + : "r"(action), "r"(arg1), "r"(arg2) \ + : "r0", "r1", "r2" \ + ); \ + return ret; \ + } + + #elif defined(__aarch64__) + #define LIBAFL_DEFINE_FUNCTIONS(name, opcode) \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call0( \ + libafl_word action) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mov x0, %1\n" \ + ".word " XSTRINGIFY(opcode) "\n" \ + "mov %0, x0\n" \ + : "=r"(ret) \ + : "r"(action) \ + : "x0" \ + ); \ + return ret; \ + } \ + \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call1( \ + libafl_word action, libafl_word arg1) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mov x0, %1\n" \ + "mov x1, %2\n" \ + ".word " XSTRINGIFY(opcode) "\n" \ + "mov %0, x0\n" \ + : "=r"(ret) \ + : "r"(action), "r"(arg1) \ + : "x0", "x1" \ + ); \ + return ret; \ + } \ + \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call2( \ + libafl_word action, libafl_word arg1, libafl_word arg2) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mov x0, %1\n" \ + "mov x1, %2\n" \ + "mov x2, %3\n" \ + ".word " XSTRINGIFY(opcode) "\n" \ + "mov %0, x0\n" \ + : "=r"(ret) \ + : "r"(action), "r"(arg1), "r"(arg2) \ + : "x0", "x1", "x2" \ + ); \ + return ret; \ + } + #elif defined(__riscv) + #define LIBAFL_DEFINE_FUNCTIONS(name, opcode) \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call0( \ + libafl_word action) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mv a0, %1\n" \ + ".word " XSTRINGIFY(opcode) "\n" \ + "mv a0, a0\n" \ + : "=r"(ret) \ + : "r"(action) \ + : "a0" \ + ); \ + return ret; \ + } \ + \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call1( \ + libafl_word action, libafl_word arg1) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mv a0, %1\n" \ + "mv a1, %2\n" \ + ".word " XSTRINGIFY(opcode) "\n" \ + "mv %0, a0\n" \ + : "=r"(ret) \ + : "r"(action), "r"(arg1) \ + : "a0", "a1" \ + ); \ + return ret; \ + } \ + \ + libafl_word LIBAFL_CALLING_CONVENTION _libafl_##name##_call2( \ + libafl_word action, libafl_word arg1, libafl_word arg2) { \ + libafl_word ret; \ + __asm__ volatile ( \ + "mv a0, %1\n" \ + "mv a1, %2\n" \ + "mv a2, %3\n" \ + ".word " XSTRINGIFY(opcode) "\n" \ + "mv %0, a0\n" \ + : "=r"(ret) \ + : "r"(action), "r"(arg1), "r"(arg2) \ + : "a0", "a1", "a2" \ + ); \ + return ret; \ + } + + #else + #warning "LibAFL QEMU Runtime does not support your architecture yet, please leave an issue." + #endif + +// Generates sync exit functions +LIBAFL_DEFINE_FUNCTIONS(sync_exit, LIBAFL_SYNC_EXIT_OPCODE) + +// Generates backdoor functions +LIBAFL_DEFINE_FUNCTIONS(backdoor, LIBAFL_BACKDOOR_OPCODE) + +STATIC_CHECKS + +#endif diff --git a/libafl_qemu/runtime/libafl_qemu_defs.h b/libafl_qemu/runtime/libafl_qemu_defs.h new file mode 100644 index 0000000000..2866cadaac --- /dev/null +++ b/libafl_qemu/runtime/libafl_qemu_defs.h @@ -0,0 +1,37 @@ +#ifndef LIBAFL_QEMU_DEFS +#define LIBAFL_QEMU_DEFS + +#define LIBAFL_STRINGIFY(s) #s +#define XSTRINGIFY(s) LIBAFL_STRINGIFY(s) + +#if __STDC_VERSION__ >= 201112L + #define STATIC_CHECKS \ + _Static_assert(sizeof(void *) <= sizeof(libafl_word), \ + "pointer type should not be larger and libafl_word"); +#else + #define STATIC_CHECKS +#endif + +#define LIBAFL_SYNC_EXIT_OPCODE 0x66f23a0f +#define LIBAFL_BACKDOOR_OPCODE 0x44f23a0f + +#define LIBAFL_QEMU_TEST_VALUE 0xcafebabe + +#define LIBAFL_QEMU_HDR_VERSION_NUMBER 0111 // TODO: find a nice way to set it. + +typedef enum LibaflQemuCommand { + LIBAFL_QEMU_COMMAND_START_VIRT = 0, + LIBAFL_QEMU_COMMAND_START_PHYS = 1, + LIBAFL_QEMU_COMMAND_INPUT_VIRT = 2, + LIBAFL_QEMU_COMMAND_INPUT_PHYS = 3, + LIBAFL_QEMU_COMMAND_END = 4, + LIBAFL_QEMU_COMMAND_SAVE = 5, + LIBAFL_QEMU_COMMAND_LOAD = 6, + LIBAFL_QEMU_COMMAND_VERSION = 7, + LIBAFL_QEMU_COMMAND_VADDR_FILTER_ALLOW = 8, + LIBAFL_QEMU_COMMAND_INTERNAL_ERROR = 9, + LIBAFL_QEMU_COMMAND_LQPRINTF = 10, + LIBAFL_QEMU_COMMAND_TEST = 11, +} LibaflExit; + +#endif diff --git a/libafl_qemu/runtime/libafl_qemu_impl.h b/libafl_qemu/runtime/libafl_qemu_impl.h new file mode 100644 index 0000000000..21773b40c2 --- /dev/null +++ b/libafl_qemu/runtime/libafl_qemu_impl.h @@ -0,0 +1,84 @@ +#ifndef LIBAFL_QEMU_IMPL +#define LIBAFL_QEMU_IMPL + +#include "libafl_qemu.h" + +static char _lqprintf_buffer[LIBAFL_QEMU_PRINTF_MAX_SIZE] = {0}; + +noinline libafl_word libafl_qemu_start_virt(void *buf_vaddr, + libafl_word max_len) { + return _libafl_sync_exit_call2(LIBAFL_QEMU_COMMAND_START_VIRT, + (libafl_word)buf_vaddr, max_len); +} + +noinline libafl_word libafl_qemu_start_phys(void *buf_paddr, + libafl_word max_len) { + return _libafl_sync_exit_call2(LIBAFL_QEMU_COMMAND_START_PHYS, + (libafl_word)buf_paddr, max_len); +} + +noinline libafl_word libafl_qemu_input_virt(void *buf_vaddr, + libafl_word max_len) { + return _libafl_sync_exit_call2(LIBAFL_QEMU_COMMAND_INPUT_VIRT, + (libafl_word)buf_vaddr, max_len); +} + +noinline libafl_word libafl_qemu_input_phys(void *buf_paddr, + libafl_word max_len) { + return _libafl_sync_exit_call2(LIBAFL_QEMU_COMMAND_INPUT_PHYS, + (libafl_word)buf_paddr, max_len); +} + +noinline void libafl_qemu_end(enum LibaflQemuEndStatus status) { + _libafl_sync_exit_call1(LIBAFL_QEMU_COMMAND_END, status); +} + +noinline void libafl_qemu_save(void) { + _libafl_sync_exit_call0(LIBAFL_QEMU_COMMAND_SAVE); +} + +noinline void libafl_qemu_load(void) { + _libafl_sync_exit_call0(LIBAFL_QEMU_COMMAND_LOAD); +} + +noinline libafl_word libafl_qemu_version(void) { + return _libafl_sync_exit_call0(LIBAFL_QEMU_COMMAND_VERSION); +} + +noinline void libafl_qemu_internal_error(void) { + _libafl_sync_exit_call0(LIBAFL_QEMU_COMMAND_INTERNAL_ERROR); +} + +#ifdef STDIO_SUPPORT +noinline void lqprintf(const char *fmt, ...) { + va_list args; + va_start(args, fmt); + int res = vsnprintf(_lqprintf_buffer, LIBAFL_QEMU_PRINTF_MAX_SIZE, fmt, args); + va_end(args); + + if (res >= LIBAFL_QEMU_PRINTF_MAX_SIZE) { + // buffer is not big enough, either recompile the target with more + // space or print less things + libafl_qemu_internal_error(); + } + + _libafl_sync_exit_call2(LIBAFL_QEMU_COMMAND_LQPRINTF, + (libafl_word)_lqprintf_buffer, res); +} +#endif + +noinline void libafl_qemu_test(void) { + _libafl_sync_exit_call1(LIBAFL_QEMU_COMMAND_TEST, LIBAFL_QEMU_TEST_VALUE); +} + +noinline void libafl_qemu_trace_vaddr_range(libafl_word start, + libafl_word end) { + _libafl_sync_exit_call2(LIBAFL_QEMU_COMMAND_VADDR_FILTER_ALLOW, start, end); +} + +noinline void libafl_qemu_trace_vaddr_size(libafl_word start, + libafl_word size) { + libafl_qemu_trace_vaddr_range(start, start + size); +} + +#endif \ No newline at end of file diff --git a/libafl_qemu/runtime/libafl_qemu_stub_bindings.rs b/libafl_qemu/runtime/libafl_qemu_stub_bindings.rs index 97b4a2d5c0..706cddb088 100644 --- a/libafl_qemu/runtime/libafl_qemu_stub_bindings.rs +++ b/libafl_qemu/runtime/libafl_qemu_stub_bindings.rs @@ -1,10 +1,15 @@ -/* 1.80.0-nightly */ -/* automatically generated by rust-bindgen 0.69.4 */ +/* 1.84.0-nightly */ +/* qemu git hash: 805b14ffc44999952562e8f219d81c21a4fa50b9 */ +/* automatically generated by rust-bindgen 0.70.1 */ -pub const _STDINT_H: u32 = 1; +pub const LIBAFL_SYNC_EXIT_OPCODE: u32 = 1727150607; +pub const LIBAFL_BACKDOOR_OPCODE: u32 = 1156725263; +pub const LIBAFL_QEMU_TEST_VALUE: u32 = 3405691582; +pub const LIBAFL_QEMU_HDR_VERSION_NUMBER: u32 = 73; +pub const _STDIO_H: u32 = 1; pub const _FEATURES_H: u32 = 1; pub const _DEFAULT_SOURCE: u32 = 1; -pub const __GLIBC_USE_ISOC2X: u32 = 0; +pub const __GLIBC_USE_ISOC23: u32 = 0; pub const __USE_ISOC11: u32 = 1; pub const __USE_ISOC99: u32 = 1; pub const __USE_ISOC95: u32 = 1; @@ -22,12 +27,13 @@ pub const __WORDSIZE: u32 = 64; pub const __WORDSIZE_TIME64_COMPAT32: u32 = 1; pub const __SYSCALL_WORDSIZE: u32 = 64; pub const __TIMESIZE: u32 = 64; +pub const __USE_TIME_BITS64: u32 = 1; pub const __USE_MISC: u32 = 1; pub const __USE_ATFILE: u32 = 1; pub const __USE_FORTIFY_LEVEL: u32 = 0; pub const __GLIBC_USE_DEPRECATED_GETS: u32 = 0; pub const __GLIBC_USE_DEPRECATED_SCANF: u32 = 0; -pub const __GLIBC_USE_C2X_STRTOL: u32 = 0; +pub const __GLIBC_USE_C23_STRTOL: u32 = 0; pub const _STDC_PREDEF_H: u32 = 1; pub const __STDC_IEC_559__: u32 = 1; pub const __STDC_IEC_60559_BFP__: u32 = 201404; @@ -36,17 +42,17 @@ pub const __STDC_IEC_60559_COMPLEX__: u32 = 201404; pub const __STDC_ISO_10646__: u32 = 201706; pub const __GNU_LIBRARY__: u32 = 6; pub const __GLIBC__: u32 = 2; -pub const __GLIBC_MINOR__: u32 = 39; +pub const __GLIBC_MINOR__: u32 = 40; pub const _SYS_CDEFS_H: u32 = 1; pub const __glibc_c99_flexarr_available: u32 = 1; pub const __LDOUBLE_REDIRECTS_TO_FLOAT128_ABI: u32 = 0; pub const __HAVE_GENERIC_SELECTION: u32 = 1; pub const __GLIBC_USE_LIB_EXT2: u32 = 0; pub const __GLIBC_USE_IEC_60559_BFP_EXT: u32 = 0; -pub const __GLIBC_USE_IEC_60559_BFP_EXT_C2X: u32 = 0; +pub const __GLIBC_USE_IEC_60559_BFP_EXT_C23: u32 = 0; pub const __GLIBC_USE_IEC_60559_EXT: u32 = 0; pub const __GLIBC_USE_IEC_60559_FUNCS_EXT: u32 = 0; -pub const __GLIBC_USE_IEC_60559_FUNCS_EXT_C2X: u32 = 0; +pub const __GLIBC_USE_IEC_60559_FUNCS_EXT_C23: u32 = 0; pub const __GLIBC_USE_IEC_60559_TYPES_EXT: u32 = 0; pub const _BITS_TYPES_H: u32 = 1; pub const _BITS_TYPESIZES_H: u32 = 1; @@ -57,6 +63,48 @@ pub const __STATFS_MATCHES_STATFS64: u32 = 1; pub const __KERNEL_OLD_TIMEVAL_MATCHES_TIMEVAL64: u32 = 1; pub const __FD_SETSIZE: u32 = 1024; pub const _BITS_TIME64_H: u32 = 1; +pub const _____fpos_t_defined: u32 = 1; +pub const ____mbstate_t_defined: u32 = 1; +pub const _____fpos64_t_defined: u32 = 1; +pub const ____FILE_defined: u32 = 1; +pub const __FILE_defined: u32 = 1; +pub const __struct_FILE_defined: u32 = 1; +pub const _IO_EOF_SEEN: u32 = 16; +pub const _IO_ERR_SEEN: u32 = 32; +pub const _IO_USER_LOCK: u32 = 32768; +pub const __cookie_io_functions_t_defined: u32 = 1; +pub const _IOFBF: u32 = 0; +pub const _IOLBF: u32 = 1; +pub const _IONBF: u32 = 2; +pub const BUFSIZ: u32 = 8192; +pub const EOF: i32 = -1; +pub const SEEK_SET: u32 = 0; +pub const SEEK_CUR: u32 = 1; +pub const SEEK_END: u32 = 2; +pub const P_tmpdir: &[u8; 5] = b"/tmp\0"; +pub const L_tmpnam: u32 = 20; +pub const TMP_MAX: u32 = 238328; +pub const _BITS_STDIO_LIM_H: u32 = 1; +pub const FILENAME_MAX: u32 = 4096; +pub const L_ctermid: u32 = 9; +pub const FOPEN_MAX: u32 = 16; +pub const __HAVE_FLOAT128: u32 = 0; +pub const __HAVE_DISTINCT_FLOAT128: u32 = 0; +pub const __HAVE_FLOAT64X: u32 = 1; +pub const __HAVE_FLOAT64X_LONG_DOUBLE: u32 = 1; +pub const __HAVE_FLOAT16: u32 = 0; +pub const __HAVE_FLOAT32: u32 = 1; +pub const __HAVE_FLOAT64: u32 = 1; +pub const __HAVE_FLOAT32X: u32 = 1; +pub const __HAVE_FLOAT128X: u32 = 0; +pub const __HAVE_DISTINCT_FLOAT16: u32 = 0; +pub const __HAVE_DISTINCT_FLOAT32: u32 = 0; +pub const __HAVE_DISTINCT_FLOAT64: u32 = 0; +pub const __HAVE_DISTINCT_FLOAT32X: u32 = 0; +pub const __HAVE_DISTINCT_FLOAT64X: u32 = 0; +pub const __HAVE_DISTINCT_FLOAT128X: u32 = 0; +pub const __HAVE_FLOATN_NOT_TYPEDEF: u32 = 0; +pub const _STDINT_H: u32 = 1; pub const _BITS_WCHAR_H: u32 = 1; pub const _BITS_STDINT_INTN_H: u32 = 1; pub const _BITS_STDINT_UINTN_H: u32 = 1; @@ -98,9 +146,56 @@ pub const SIG_ATOMIC_MAX: u32 = 2147483647; pub const SIZE_MAX: i32 = -1; pub const WINT_MIN: u32 = 0; pub const WINT_MAX: u32 = 4294967295; -pub const LIBAFL_SYNC_EXIT_OPCODE: u32 = 1727150607; -pub const LIBAFL_BACKDOOR_OPCODE: u32 = 1156725263; -pub const LIBAFL_QEMU_HDR_VERSION_NUMBER: u32 = 73; +pub const LIBAFL_QEMU_PRINTF_MAX_SIZE: u32 = 4096; +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_START_VIRT: LibaflQemuCommand = + LibaflQemuCommand(0); +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_START_PHYS: LibaflQemuCommand = + LibaflQemuCommand(1); +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_INPUT_VIRT: LibaflQemuCommand = + LibaflQemuCommand(2); +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_INPUT_PHYS: LibaflQemuCommand = + LibaflQemuCommand(3); +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_END: LibaflQemuCommand = LibaflQemuCommand(4); +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_SAVE: LibaflQemuCommand = LibaflQemuCommand(5); +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_LOAD: LibaflQemuCommand = LibaflQemuCommand(6); +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_VERSION: LibaflQemuCommand = LibaflQemuCommand(7); +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_VADDR_FILTER_ALLOW: LibaflQemuCommand = + LibaflQemuCommand(8); +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_INTERNAL_ERROR: LibaflQemuCommand = + LibaflQemuCommand(9); +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_LQPRINTF: LibaflQemuCommand = LibaflQemuCommand(10); +pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_TEST: LibaflQemuCommand = LibaflQemuCommand(11); +impl ::std::ops::BitOr for LibaflQemuCommand { + type Output = Self; + #[inline] + fn bitor(self, other: Self) -> Self { + LibaflQemuCommand(self.0 | other.0) + } +} +impl ::std::ops::BitOrAssign for LibaflQemuCommand { + #[inline] + fn bitor_assign(&mut self, rhs: LibaflQemuCommand) { + self.0 |= rhs.0; + } +} +impl ::std::ops::BitAnd for LibaflQemuCommand { + type Output = Self; + #[inline] + fn bitand(self, other: Self) -> Self { + LibaflQemuCommand(self.0 & other.0) + } +} +impl ::std::ops::BitAndAssign for LibaflQemuCommand { + #[inline] + fn bitand_assign(&mut self, rhs: LibaflQemuCommand) { + self.0 &= rhs.0; + } +} +#[repr(transparent)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +pub struct LibaflQemuCommand(pub ::std::os::raw::c_uint); +pub use self::LibaflQemuCommand as LibaflExit; +pub type __gnuc_va_list = __builtin_va_list; pub type __u_char = ::std::os::raw::c_uchar; pub type __u_short = ::std::os::raw::c_ushort; pub type __u_int = ::std::os::raw::c_uint; @@ -140,31 +235,12 @@ pub type __pid_t = ::std::os::raw::c_int; pub struct __fsid_t { pub __val: [::std::os::raw::c_int; 2usize], } -#[test] -fn bindgen_test_layout___fsid_t() { - const UNINIT: ::std::mem::MaybeUninit<__fsid_t> = ::std::mem::MaybeUninit::uninit(); - let ptr = UNINIT.as_ptr(); - assert_eq!( - ::std::mem::size_of::<__fsid_t>(), - 8usize, - concat!("Size of: ", stringify!(__fsid_t)) - ); - assert_eq!( - ::std::mem::align_of::<__fsid_t>(), - 4usize, - concat!("Alignment of ", stringify!(__fsid_t)) - ); - assert_eq!( - unsafe { ::std::ptr::addr_of!((*ptr).__val) as usize - ptr as usize }, - 0usize, - concat!( - "Offset of field: ", - stringify!(__fsid_t), - "::", - stringify!(__val) - ) - ); -} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of __fsid_t"][::std::mem::size_of::<__fsid_t>() - 8usize]; + ["Alignment of __fsid_t"][::std::mem::align_of::<__fsid_t>() - 4usize]; + ["Offset of field: __fsid_t::__val"][::std::mem::offset_of!(__fsid_t, __val) - 0usize]; +}; pub type __clock_t = ::std::os::raw::c_long; pub type __rlim_t = ::std::os::raw::c_ulong; pub type __rlim64_t = ::std::os::raw::c_ulong; @@ -193,6 +269,772 @@ pub type __caddr_t = *mut ::std::os::raw::c_char; pub type __intptr_t = ::std::os::raw::c_long; pub type __socklen_t = ::std::os::raw::c_uint; pub type __sig_atomic_t = ::std::os::raw::c_int; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct __mbstate_t { + pub __count: ::std::os::raw::c_int, + pub __value: __mbstate_t__bindgen_ty_1, +} +#[repr(C)] +#[derive(Copy, Clone)] +pub union __mbstate_t__bindgen_ty_1 { + pub __wch: ::std::os::raw::c_uint, + pub __wchb: [::std::os::raw::c_char; 4usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of __mbstate_t__bindgen_ty_1"] + [::std::mem::size_of::<__mbstate_t__bindgen_ty_1>() - 4usize]; + ["Alignment of __mbstate_t__bindgen_ty_1"] + [::std::mem::align_of::<__mbstate_t__bindgen_ty_1>() - 4usize]; + ["Offset of field: __mbstate_t__bindgen_ty_1::__wch"] + [::std::mem::offset_of!(__mbstate_t__bindgen_ty_1, __wch) - 0usize]; + ["Offset of field: __mbstate_t__bindgen_ty_1::__wchb"] + [::std::mem::offset_of!(__mbstate_t__bindgen_ty_1, __wchb) - 0usize]; +}; +impl Default for __mbstate_t__bindgen_ty_1 { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for __mbstate_t__bindgen_ty_1 { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!(f, "__mbstate_t__bindgen_ty_1 {{ union }}") + } +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of __mbstate_t"][::std::mem::size_of::<__mbstate_t>() - 8usize]; + ["Alignment of __mbstate_t"][::std::mem::align_of::<__mbstate_t>() - 4usize]; + ["Offset of field: __mbstate_t::__count"] + [::std::mem::offset_of!(__mbstate_t, __count) - 0usize]; + ["Offset of field: __mbstate_t::__value"] + [::std::mem::offset_of!(__mbstate_t, __value) - 4usize]; +}; +impl Default for __mbstate_t { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for __mbstate_t { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "__mbstate_t {{ __count: {:?}, __value: {:?} }}", + self.__count, self.__value + ) + } +} +#[repr(C)] +#[derive(Copy, Clone)] +pub struct _G_fpos_t { + pub __pos: __off_t, + pub __state: __mbstate_t, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of _G_fpos_t"][::std::mem::size_of::<_G_fpos_t>() - 16usize]; + ["Alignment of _G_fpos_t"][::std::mem::align_of::<_G_fpos_t>() - 8usize]; + ["Offset of field: _G_fpos_t::__pos"][::std::mem::offset_of!(_G_fpos_t, __pos) - 0usize]; + ["Offset of field: _G_fpos_t::__state"][::std::mem::offset_of!(_G_fpos_t, __state) - 8usize]; +}; +impl Default for _G_fpos_t { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for _G_fpos_t { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "_G_fpos_t {{ __pos: {:?}, __state: {:?} }}", + self.__pos, self.__state + ) + } +} +pub type __fpos_t = _G_fpos_t; +#[repr(C)] +#[derive(Copy, Clone)] +pub struct _G_fpos64_t { + pub __pos: __off64_t, + pub __state: __mbstate_t, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of _G_fpos64_t"][::std::mem::size_of::<_G_fpos64_t>() - 16usize]; + ["Alignment of _G_fpos64_t"][::std::mem::align_of::<_G_fpos64_t>() - 8usize]; + ["Offset of field: _G_fpos64_t::__pos"][::std::mem::offset_of!(_G_fpos64_t, __pos) - 0usize]; + ["Offset of field: _G_fpos64_t::__state"] + [::std::mem::offset_of!(_G_fpos64_t, __state) - 8usize]; +}; +impl Default for _G_fpos64_t { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +impl ::std::fmt::Debug for _G_fpos64_t { + fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result { + write!( + f, + "_G_fpos64_t {{ __pos: {:?}, __state: {:?} }}", + self.__pos, self.__state + ) + } +} +pub type __fpos64_t = _G_fpos64_t; +pub type __FILE = _IO_FILE; +pub type FILE = _IO_FILE; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _IO_marker { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _IO_codecvt { + _unused: [u8; 0], +} +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _IO_wide_data { + _unused: [u8; 0], +} +pub type _IO_lock_t = ::std::os::raw::c_void; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct _IO_FILE { + pub _flags: ::std::os::raw::c_int, + pub _IO_read_ptr: *mut ::std::os::raw::c_char, + pub _IO_read_end: *mut ::std::os::raw::c_char, + pub _IO_read_base: *mut ::std::os::raw::c_char, + pub _IO_write_base: *mut ::std::os::raw::c_char, + pub _IO_write_ptr: *mut ::std::os::raw::c_char, + pub _IO_write_end: *mut ::std::os::raw::c_char, + pub _IO_buf_base: *mut ::std::os::raw::c_char, + pub _IO_buf_end: *mut ::std::os::raw::c_char, + pub _IO_save_base: *mut ::std::os::raw::c_char, + pub _IO_backup_base: *mut ::std::os::raw::c_char, + pub _IO_save_end: *mut ::std::os::raw::c_char, + pub _markers: *mut _IO_marker, + pub _chain: *mut _IO_FILE, + pub _fileno: ::std::os::raw::c_int, + pub _flags2: ::std::os::raw::c_int, + pub _old_offset: __off_t, + pub _cur_column: ::std::os::raw::c_ushort, + pub _vtable_offset: ::std::os::raw::c_schar, + pub _shortbuf: [::std::os::raw::c_char; 1usize], + pub _lock: *mut _IO_lock_t, + pub _offset: __off64_t, + pub _codecvt: *mut _IO_codecvt, + pub _wide_data: *mut _IO_wide_data, + pub _freeres_list: *mut _IO_FILE, + pub _freeres_buf: *mut ::std::os::raw::c_void, + pub _prevchain: *mut *mut _IO_FILE, + pub _mode: ::std::os::raw::c_int, + pub _unused2: [::std::os::raw::c_char; 20usize], +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of _IO_FILE"][::std::mem::size_of::<_IO_FILE>() - 216usize]; + ["Alignment of _IO_FILE"][::std::mem::align_of::<_IO_FILE>() - 8usize]; + ["Offset of field: _IO_FILE::_flags"][::std::mem::offset_of!(_IO_FILE, _flags) - 0usize]; + ["Offset of field: _IO_FILE::_IO_read_ptr"] + [::std::mem::offset_of!(_IO_FILE, _IO_read_ptr) - 8usize]; + ["Offset of field: _IO_FILE::_IO_read_end"] + [::std::mem::offset_of!(_IO_FILE, _IO_read_end) - 16usize]; + ["Offset of field: _IO_FILE::_IO_read_base"] + [::std::mem::offset_of!(_IO_FILE, _IO_read_base) - 24usize]; + ["Offset of field: _IO_FILE::_IO_write_base"] + [::std::mem::offset_of!(_IO_FILE, _IO_write_base) - 32usize]; + ["Offset of field: _IO_FILE::_IO_write_ptr"] + [::std::mem::offset_of!(_IO_FILE, _IO_write_ptr) - 40usize]; + ["Offset of field: _IO_FILE::_IO_write_end"] + [::std::mem::offset_of!(_IO_FILE, _IO_write_end) - 48usize]; + ["Offset of field: _IO_FILE::_IO_buf_base"] + [::std::mem::offset_of!(_IO_FILE, _IO_buf_base) - 56usize]; + ["Offset of field: _IO_FILE::_IO_buf_end"] + [::std::mem::offset_of!(_IO_FILE, _IO_buf_end) - 64usize]; + ["Offset of field: _IO_FILE::_IO_save_base"] + [::std::mem::offset_of!(_IO_FILE, _IO_save_base) - 72usize]; + ["Offset of field: _IO_FILE::_IO_backup_base"] + [::std::mem::offset_of!(_IO_FILE, _IO_backup_base) - 80usize]; + ["Offset of field: _IO_FILE::_IO_save_end"] + [::std::mem::offset_of!(_IO_FILE, _IO_save_end) - 88usize]; + ["Offset of field: _IO_FILE::_markers"][::std::mem::offset_of!(_IO_FILE, _markers) - 96usize]; + ["Offset of field: _IO_FILE::_chain"][::std::mem::offset_of!(_IO_FILE, _chain) - 104usize]; + ["Offset of field: _IO_FILE::_fileno"][::std::mem::offset_of!(_IO_FILE, _fileno) - 112usize]; + ["Offset of field: _IO_FILE::_flags2"][::std::mem::offset_of!(_IO_FILE, _flags2) - 116usize]; + ["Offset of field: _IO_FILE::_old_offset"] + [::std::mem::offset_of!(_IO_FILE, _old_offset) - 120usize]; + ["Offset of field: _IO_FILE::_cur_column"] + [::std::mem::offset_of!(_IO_FILE, _cur_column) - 128usize]; + ["Offset of field: _IO_FILE::_vtable_offset"] + [::std::mem::offset_of!(_IO_FILE, _vtable_offset) - 130usize]; + ["Offset of field: _IO_FILE::_shortbuf"] + [::std::mem::offset_of!(_IO_FILE, _shortbuf) - 131usize]; + ["Offset of field: _IO_FILE::_lock"][::std::mem::offset_of!(_IO_FILE, _lock) - 136usize]; + ["Offset of field: _IO_FILE::_offset"][::std::mem::offset_of!(_IO_FILE, _offset) - 144usize]; + ["Offset of field: _IO_FILE::_codecvt"][::std::mem::offset_of!(_IO_FILE, _codecvt) - 152usize]; + ["Offset of field: _IO_FILE::_wide_data"] + [::std::mem::offset_of!(_IO_FILE, _wide_data) - 160usize]; + ["Offset of field: _IO_FILE::_freeres_list"] + [::std::mem::offset_of!(_IO_FILE, _freeres_list) - 168usize]; + ["Offset of field: _IO_FILE::_freeres_buf"] + [::std::mem::offset_of!(_IO_FILE, _freeres_buf) - 176usize]; + ["Offset of field: _IO_FILE::_prevchain"] + [::std::mem::offset_of!(_IO_FILE, _prevchain) - 184usize]; + ["Offset of field: _IO_FILE::_mode"][::std::mem::offset_of!(_IO_FILE, _mode) - 192usize]; + ["Offset of field: _IO_FILE::_unused2"][::std::mem::offset_of!(_IO_FILE, _unused2) - 196usize]; +}; +impl Default for _IO_FILE { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} +pub type cookie_read_function_t = ::std::option::Option< + unsafe extern "C" fn( + __cookie: *mut ::std::os::raw::c_void, + __buf: *mut ::std::os::raw::c_char, + __nbytes: usize, + ) -> __ssize_t, +>; +pub type cookie_write_function_t = ::std::option::Option< + unsafe extern "C" fn( + __cookie: *mut ::std::os::raw::c_void, + __buf: *const ::std::os::raw::c_char, + __nbytes: usize, + ) -> __ssize_t, +>; +pub type cookie_seek_function_t = ::std::option::Option< + unsafe extern "C" fn( + __cookie: *mut ::std::os::raw::c_void, + __pos: *mut __off64_t, + __w: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int, +>; +pub type cookie_close_function_t = ::std::option::Option< + unsafe extern "C" fn(__cookie: *mut ::std::os::raw::c_void) -> ::std::os::raw::c_int, +>; +#[repr(C)] +#[derive(Debug, Default, Copy, Clone)] +pub struct _IO_cookie_io_functions_t { + pub read: cookie_read_function_t, + pub write: cookie_write_function_t, + pub seek: cookie_seek_function_t, + pub close: cookie_close_function_t, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of _IO_cookie_io_functions_t"] + [::std::mem::size_of::<_IO_cookie_io_functions_t>() - 32usize]; + ["Alignment of _IO_cookie_io_functions_t"] + [::std::mem::align_of::<_IO_cookie_io_functions_t>() - 8usize]; + ["Offset of field: _IO_cookie_io_functions_t::read"] + [::std::mem::offset_of!(_IO_cookie_io_functions_t, read) - 0usize]; + ["Offset of field: _IO_cookie_io_functions_t::write"] + [::std::mem::offset_of!(_IO_cookie_io_functions_t, write) - 8usize]; + ["Offset of field: _IO_cookie_io_functions_t::seek"] + [::std::mem::offset_of!(_IO_cookie_io_functions_t, seek) - 16usize]; + ["Offset of field: _IO_cookie_io_functions_t::close"] + [::std::mem::offset_of!(_IO_cookie_io_functions_t, close) - 24usize]; +}; +pub type cookie_io_functions_t = _IO_cookie_io_functions_t; +pub type va_list = __gnuc_va_list; +pub type off_t = __off_t; +pub type fpos_t = __fpos_t; +extern "C" { + pub static mut stdin: *mut FILE; +} +extern "C" { + pub static mut stdout: *mut FILE; +} +extern "C" { + pub static mut stderr: *mut FILE; +} +extern "C" { + pub fn remove(__filename: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn rename( + __old: *const ::std::os::raw::c_char, + __new: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn renameat( + __oldfd: ::std::os::raw::c_int, + __old: *const ::std::os::raw::c_char, + __newfd: ::std::os::raw::c_int, + __new: *const ::std::os::raw::c_char, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fclose(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn tmpfile() -> *mut FILE; +} +extern "C" { + pub fn tmpnam(arg1: *mut ::std::os::raw::c_char) -> *mut ::std::os::raw::c_char; +} +extern "C" { + pub fn tmpnam_r(__s: *mut ::std::os::raw::c_char) -> *mut ::std::os::raw::c_char; +} +extern "C" { + pub fn tempnam( + __dir: *const ::std::os::raw::c_char, + __pfx: *const ::std::os::raw::c_char, + ) -> *mut ::std::os::raw::c_char; +} +extern "C" { + pub fn fflush(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fflush_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fopen( + __filename: *const ::std::os::raw::c_char, + __modes: *const ::std::os::raw::c_char, + ) -> *mut FILE; +} +extern "C" { + pub fn freopen( + __filename: *const ::std::os::raw::c_char, + __modes: *const ::std::os::raw::c_char, + __stream: *mut FILE, + ) -> *mut FILE; +} +extern "C" { + pub fn fdopen(__fd: ::std::os::raw::c_int, __modes: *const ::std::os::raw::c_char) + -> *mut FILE; +} +extern "C" { + pub fn fopencookie( + __magic_cookie: *mut ::std::os::raw::c_void, + __modes: *const ::std::os::raw::c_char, + __io_funcs: cookie_io_functions_t, + ) -> *mut FILE; +} +extern "C" { + pub fn fmemopen( + __s: *mut ::std::os::raw::c_void, + __len: usize, + __modes: *const ::std::os::raw::c_char, + ) -> *mut FILE; +} +extern "C" { + pub fn open_memstream( + __bufloc: *mut *mut ::std::os::raw::c_char, + __sizeloc: *mut usize, + ) -> *mut FILE; +} +extern "C" { + pub fn setbuf(__stream: *mut FILE, __buf: *mut ::std::os::raw::c_char); +} +extern "C" { + pub fn setvbuf( + __stream: *mut FILE, + __buf: *mut ::std::os::raw::c_char, + __modes: ::std::os::raw::c_int, + __n: usize, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn setbuffer(__stream: *mut FILE, __buf: *mut ::std::os::raw::c_char, __size: usize); +} +extern "C" { + pub fn setlinebuf(__stream: *mut FILE); +} +extern "C" { + pub fn fprintf( + __stream: *mut FILE, + __format: *const ::std::os::raw::c_char, + ... + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn printf(__format: *const ::std::os::raw::c_char, ...) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sprintf( + __s: *mut ::std::os::raw::c_char, + __format: *const ::std::os::raw::c_char, + ... + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn vfprintf( + __s: *mut FILE, + __format: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn vprintf( + __format: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn vsprintf( + __s: *mut ::std::os::raw::c_char, + __format: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn snprintf( + __s: *mut ::std::os::raw::c_char, + __maxlen: ::std::os::raw::c_ulong, + __format: *const ::std::os::raw::c_char, + ... + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn vsnprintf( + __s: *mut ::std::os::raw::c_char, + __maxlen: ::std::os::raw::c_ulong, + __format: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn vasprintf( + __ptr: *mut *mut ::std::os::raw::c_char, + __f: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn __asprintf( + __ptr: *mut *mut ::std::os::raw::c_char, + __fmt: *const ::std::os::raw::c_char, + ... + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn asprintf( + __ptr: *mut *mut ::std::os::raw::c_char, + __fmt: *const ::std::os::raw::c_char, + ... + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn vdprintf( + __fd: ::std::os::raw::c_int, + __fmt: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn dprintf( + __fd: ::std::os::raw::c_int, + __fmt: *const ::std::os::raw::c_char, + ... + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fscanf( + __stream: *mut FILE, + __format: *const ::std::os::raw::c_char, + ... + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn scanf(__format: *const ::std::os::raw::c_char, ...) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn sscanf( + __s: *const ::std::os::raw::c_char, + __format: *const ::std::os::raw::c_char, + ... + ) -> ::std::os::raw::c_int; +} +pub type _Float32 = f32; +pub type _Float64 = f64; +pub type _Float32x = f64; +pub type _Float64x = u128; +extern "C" { + #[link_name = "\u{1}__isoc99_fscanf"] + pub fn fscanf1( + __stream: *mut FILE, + __format: *const ::std::os::raw::c_char, + ... + ) -> ::std::os::raw::c_int; +} +extern "C" { + #[link_name = "\u{1}__isoc99_scanf"] + pub fn scanf1(__format: *const ::std::os::raw::c_char, ...) -> ::std::os::raw::c_int; +} +extern "C" { + #[link_name = "\u{1}__isoc99_sscanf"] + pub fn sscanf1( + __s: *const ::std::os::raw::c_char, + __format: *const ::std::os::raw::c_char, + ... + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn vfscanf( + __s: *mut FILE, + __format: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn vscanf( + __format: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn vsscanf( + __s: *const ::std::os::raw::c_char, + __format: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + #[link_name = "\u{1}__isoc99_vfscanf"] + pub fn vfscanf1( + __s: *mut FILE, + __format: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + #[link_name = "\u{1}__isoc99_vscanf"] + pub fn vscanf1( + __format: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + #[link_name = "\u{1}__isoc99_vsscanf"] + pub fn vsscanf1( + __s: *const ::std::os::raw::c_char, + __format: *const ::std::os::raw::c_char, + __arg: *mut __va_list_tag, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fgetc(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn getc(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn getchar() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn getc_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn getchar_unlocked() -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fgetc_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fputc(__c: ::std::os::raw::c_int, __stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn putc(__c: ::std::os::raw::c_int, __stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn putchar(__c: ::std::os::raw::c_int) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fputc_unlocked(__c: ::std::os::raw::c_int, __stream: *mut FILE) + -> ::std::os::raw::c_int; +} +extern "C" { + pub fn putc_unlocked(__c: ::std::os::raw::c_int, __stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn putchar_unlocked(__c: ::std::os::raw::c_int) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn getw(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn putw(__w: ::std::os::raw::c_int, __stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fgets( + __s: *mut ::std::os::raw::c_char, + __n: ::std::os::raw::c_int, + __stream: *mut FILE, + ) -> *mut ::std::os::raw::c_char; +} +extern "C" { + pub fn __getdelim( + __lineptr: *mut *mut ::std::os::raw::c_char, + __n: *mut usize, + __delimiter: ::std::os::raw::c_int, + __stream: *mut FILE, + ) -> __ssize_t; +} +extern "C" { + pub fn getdelim( + __lineptr: *mut *mut ::std::os::raw::c_char, + __n: *mut usize, + __delimiter: ::std::os::raw::c_int, + __stream: *mut FILE, + ) -> __ssize_t; +} +extern "C" { + pub fn getline( + __lineptr: *mut *mut ::std::os::raw::c_char, + __n: *mut usize, + __stream: *mut FILE, + ) -> __ssize_t; +} +extern "C" { + pub fn fputs(__s: *const ::std::os::raw::c_char, __stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn puts(__s: *const ::std::os::raw::c_char) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn ungetc(__c: ::std::os::raw::c_int, __stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fread( + __ptr: *mut ::std::os::raw::c_void, + __size: ::std::os::raw::c_ulong, + __n: ::std::os::raw::c_ulong, + __stream: *mut FILE, + ) -> ::std::os::raw::c_ulong; +} +extern "C" { + pub fn fwrite( + __ptr: *const ::std::os::raw::c_void, + __size: ::std::os::raw::c_ulong, + __n: ::std::os::raw::c_ulong, + __s: *mut FILE, + ) -> ::std::os::raw::c_ulong; +} +extern "C" { + pub fn fread_unlocked( + __ptr: *mut ::std::os::raw::c_void, + __size: usize, + __n: usize, + __stream: *mut FILE, + ) -> usize; +} +extern "C" { + pub fn fwrite_unlocked( + __ptr: *const ::std::os::raw::c_void, + __size: usize, + __n: usize, + __stream: *mut FILE, + ) -> usize; +} +extern "C" { + pub fn fseek( + __stream: *mut FILE, + __off: ::std::os::raw::c_long, + __whence: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn ftell(__stream: *mut FILE) -> ::std::os::raw::c_long; +} +extern "C" { + pub fn rewind(__stream: *mut FILE); +} +extern "C" { + pub fn fseeko( + __stream: *mut FILE, + __off: __off_t, + __whence: ::std::os::raw::c_int, + ) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn ftello(__stream: *mut FILE) -> __off_t; +} +extern "C" { + pub fn fgetpos(__stream: *mut FILE, __pos: *mut fpos_t) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fsetpos(__stream: *mut FILE, __pos: *const fpos_t) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn clearerr(__stream: *mut FILE); +} +extern "C" { + pub fn feof(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn ferror(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn clearerr_unlocked(__stream: *mut FILE); +} +extern "C" { + pub fn feof_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn ferror_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn perror(__s: *const ::std::os::raw::c_char); +} +extern "C" { + pub fn fileno(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn fileno_unlocked(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn pclose(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn popen( + __command: *const ::std::os::raw::c_char, + __modes: *const ::std::os::raw::c_char, + ) -> *mut FILE; +} +extern "C" { + pub fn ctermid(__s: *mut ::std::os::raw::c_char) -> *mut ::std::os::raw::c_char; +} +extern "C" { + pub fn flockfile(__stream: *mut FILE); +} +extern "C" { + pub fn ftrylockfile(__stream: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn funlockfile(__stream: *mut FILE); +} +extern "C" { + pub fn __uflow(arg1: *mut FILE) -> ::std::os::raw::c_int; +} +extern "C" { + pub fn __overflow(arg1: *mut FILE, arg2: ::std::os::raw::c_int) -> ::std::os::raw::c_int; +} pub type int_least8_t = __int_least8_t; pub type int_least16_t = __int_least16_t; pub type int_least32_t = __int_least32_t; @@ -212,50 +1054,32 @@ pub type uint_fast64_t = ::std::os::raw::c_ulong; pub type intmax_t = __intmax_t; pub type uintmax_t = __uintmax_t; pub type libafl_word = u64; -pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_START_VIRT: LibaflQemuCommand = - LibaflQemuCommand(0); -pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_START_PHYS: LibaflQemuCommand = - LibaflQemuCommand(1); -pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_INPUT_VIRT: LibaflQemuCommand = - LibaflQemuCommand(2); -pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_INPUT_PHYS: LibaflQemuCommand = - LibaflQemuCommand(3); -pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_END: LibaflQemuCommand = LibaflQemuCommand(4); -pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_SAVE: LibaflQemuCommand = LibaflQemuCommand(5); -pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_LOAD: LibaflQemuCommand = LibaflQemuCommand(6); -pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_VERSION: LibaflQemuCommand = LibaflQemuCommand(7); -pub const LibaflQemuCommand_LIBAFL_QEMU_COMMAND_VADDR_FILTER_ALLOW: LibaflQemuCommand = - LibaflQemuCommand(8); -impl ::std::ops::BitOr for LibaflQemuCommand { - type Output = Self; - #[inline] - fn bitor(self, other: Self) -> Self { - LibaflQemuCommand(self.0 | other.0) - } +extern "C" { + pub fn _libafl_sync_exit_call0(action: libafl_word) -> libafl_word; } -impl ::std::ops::BitOrAssign for LibaflQemuCommand { - #[inline] - fn bitor_assign(&mut self, rhs: LibaflQemuCommand) { - self.0 |= rhs.0; - } +extern "C" { + pub fn _libafl_sync_exit_call1(action: libafl_word, arg1: libafl_word) -> libafl_word; } -impl ::std::ops::BitAnd for LibaflQemuCommand { - type Output = Self; - #[inline] - fn bitand(self, other: Self) -> Self { - LibaflQemuCommand(self.0 & other.0) - } +extern "C" { + pub fn _libafl_sync_exit_call2( + action: libafl_word, + arg1: libafl_word, + arg2: libafl_word, + ) -> libafl_word; } -impl ::std::ops::BitAndAssign for LibaflQemuCommand { - #[inline] - fn bitand_assign(&mut self, rhs: LibaflQemuCommand) { - self.0 &= rhs.0; - } +extern "C" { + pub fn _libafl_backdoor_call0(action: libafl_word) -> libafl_word; +} +extern "C" { + pub fn _libafl_backdoor_call1(action: libafl_word, arg1: libafl_word) -> libafl_word; +} +extern "C" { + pub fn _libafl_backdoor_call2( + action: libafl_word, + arg1: libafl_word, + arg2: libafl_word, + ) -> libafl_word; } -#[repr(transparent)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct LibaflQemuCommand(pub ::std::os::raw::c_uint); -pub use self::LibaflQemuCommand as LibaflExit; pub const LibaflQemuEndStatus_LIBAFL_QEMU_END_UNKNOWN: LibaflQemuEndStatus = LibaflQemuEndStatus(0); pub const LibaflQemuEndStatus_LIBAFL_QEMU_END_OK: LibaflQemuEndStatus = LibaflQemuEndStatus(1); pub const LibaflQemuEndStatus_LIBAFL_QEMU_END_CRASH: LibaflQemuEndStatus = LibaflQemuEndStatus(2); @@ -286,32 +1110,94 @@ impl ::std::ops::BitAndAssign for LibaflQemuEndStatus { } } #[repr(transparent)] +#[doc = " LibAFL QEMU header file.\n\n This file is a portable header file used to build target harnesses more\n conveniently. Its main purpose is to generate ready-to-use calls to\n communicate with the fuzzer. The list of commands is available at the bottom\n of this file. The rest mostly consists of macros generating the code used by\n the commands."] #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] pub struct LibaflQemuEndStatus(pub ::std::os::raw::c_uint); -pub use self::LibaflQemuEndStatus as LibaflExitEndParams; extern "C" { - pub fn _libafl_sync_exit_call0(action: libafl_word) -> libafl_word; -} -extern "C" { - pub fn _libafl_sync_exit_call1(action: libafl_word, arg1: libafl_word) -> libafl_word; -} -extern "C" { - pub fn _libafl_sync_exit_call2( - action: libafl_word, - arg1: libafl_word, - arg2: libafl_word, + pub fn libafl_qemu_start_virt( + buf_vaddr: *mut ::std::os::raw::c_void, + max_len: libafl_word, ) -> libafl_word; } extern "C" { - pub fn _libafl_backdoor_call0(action: libafl_word) -> libafl_word; -} -extern "C" { - pub fn _libafl_backdoor_call1(action: libafl_word, arg1: libafl_word) -> libafl_word; -} -extern "C" { - pub fn _libafl_backdoor_call2( - action: libafl_word, - arg1: libafl_word, - arg2: libafl_word, + pub fn libafl_qemu_start_phys( + buf_paddr: *mut ::std::os::raw::c_void, + max_len: libafl_word, ) -> libafl_word; } +extern "C" { + pub fn libafl_qemu_input_virt( + buf_vaddr: *mut ::std::os::raw::c_void, + max_len: libafl_word, + ) -> libafl_word; +} +extern "C" { + pub fn libafl_qemu_input_phys( + buf_paddr: *mut ::std::os::raw::c_void, + max_len: libafl_word, + ) -> libafl_word; +} +extern "C" { + pub fn libafl_qemu_end(status: LibaflQemuEndStatus); +} +extern "C" { + pub fn libafl_qemu_save(); +} +extern "C" { + pub fn libafl_qemu_load(); +} +extern "C" { + pub fn libafl_qemu_version() -> libafl_word; +} +extern "C" { + pub fn libafl_qemu_page_current_allow(); +} +extern "C" { + pub fn libafl_qemu_internal_error(); +} +extern "C" { + pub fn lqprintf(fmt: *const ::std::os::raw::c_char, ...); +} +extern "C" { + pub fn libafl_qemu_test(); +} +extern "C" { + pub fn libafl_qemu_trace_vaddr_range(start: libafl_word, end: libafl_word); +} +extern "C" { + pub fn libafl_qemu_trace_vaddr_size(start: libafl_word, size: libafl_word); +} +extern "C" { + pub static mut _lqprintf_buffer: [::std::os::raw::c_char; 4096usize]; +} +pub type __builtin_va_list = [__va_list_tag; 1usize]; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +pub struct __va_list_tag { + pub gp_offset: ::std::os::raw::c_uint, + pub fp_offset: ::std::os::raw::c_uint, + pub overflow_arg_area: *mut ::std::os::raw::c_void, + pub reg_save_area: *mut ::std::os::raw::c_void, +} +#[allow(clippy::unnecessary_operation, clippy::identity_op)] +const _: () = { + ["Size of __va_list_tag"][::std::mem::size_of::<__va_list_tag>() - 24usize]; + ["Alignment of __va_list_tag"][::std::mem::align_of::<__va_list_tag>() - 8usize]; + ["Offset of field: __va_list_tag::gp_offset"] + [::std::mem::offset_of!(__va_list_tag, gp_offset) - 0usize]; + ["Offset of field: __va_list_tag::fp_offset"] + [::std::mem::offset_of!(__va_list_tag, fp_offset) - 4usize]; + ["Offset of field: __va_list_tag::overflow_arg_area"] + [::std::mem::offset_of!(__va_list_tag, overflow_arg_area) - 8usize]; + ["Offset of field: __va_list_tag::reg_save_area"] + [::std::mem::offset_of!(__va_list_tag, reg_save_area) - 16usize]; +}; +impl Default for __va_list_tag { + fn default() -> Self { + let mut s = ::std::mem::MaybeUninit::::uninit(); + unsafe { + ::std::ptr::write_bytes(s.as_mut_ptr(), 0, 1); + s.assume_init() + } + } +} diff --git a/libafl_qemu/src/arch/aarch64.rs b/libafl_qemu/src/arch/aarch64.rs index 9a8395f17c..f68fe27b35 100644 --- a/libafl_qemu/src/arch/aarch64.rs +++ b/libafl_qemu/src/arch/aarch64.rs @@ -73,14 +73,6 @@ impl Regs { pub const Lr: Regs = Regs::X30; } -#[cfg(feature = "python")] -impl IntoPy for Regs { - fn into_py(self, py: Python) -> PyObject { - let n: i32 = self.into(); - n.into_py(py) - } -} - /// Return an ARM64 ArchCapstoneBuilder pub fn capstone() -> capstone::arch::arm64::ArchCapstoneBuilder { capstone::Capstone::new() @@ -91,10 +83,7 @@ pub fn capstone() -> capstone::arch::arm64::ArchCapstoneBuilder { pub type GuestReg = u64; impl crate::ArchExtras for crate::CPU { - fn read_return_address(&self) -> Result - where - T: From, - { + fn read_return_address(&self) -> Result { self.read_reg(Regs::Lr) } @@ -105,10 +94,11 @@ impl crate::ArchExtras for crate::CPU { self.write_reg(Regs::Lr, val) } - fn read_function_argument(&self, conv: CallingConvention, idx: u8) -> Result - where - T: From, - { + fn read_function_argument( + &self, + conv: CallingConvention, + idx: u8, + ) -> Result { QemuRWError::check_conv(QemuRWErrorKind::Read, CallingConvention::Cdecl, conv)?; let reg_id = match idx { diff --git a/libafl_qemu/src/arch/arm.rs b/libafl_qemu/src/arch/arm.rs index c34eb25e00..29b126964d 100644 --- a/libafl_qemu/src/arch/arm.rs +++ b/libafl_qemu/src/arch/arm.rs @@ -71,14 +71,6 @@ impl Regs { pub const Cpsr: Regs = Regs::R25; } -#[cfg(feature = "python")] -impl IntoPy for Regs { - fn into_py(self, py: Python) -> PyObject { - let n: i32 = self.into(); - n.into_py(py) - } -} - /// Return an ARM ArchCapstoneBuilder pub fn capstone() -> capstone::arch::arm::ArchCapstoneBuilder { capstone::Capstone::new() @@ -96,10 +88,7 @@ pub fn capstone_thumb() -> capstone::arch::arm::ArchCapstoneBuilder { pub type GuestReg = u32; impl crate::ArchExtras for crate::CPU { - fn read_return_address(&self) -> Result - where - T: From, - { + fn read_return_address(&self) -> Result { self.read_reg(Regs::Lr) } @@ -110,10 +99,11 @@ impl crate::ArchExtras for crate::CPU { self.write_reg(Regs::Lr, val) } - fn read_function_argument(&self, conv: CallingConvention, idx: u8) -> Result - where - T: From, - { + fn read_function_argument( + &self, + conv: CallingConvention, + idx: u8, + ) -> Result { QemuRWError::check_conv(QemuRWErrorKind::Read, CallingConvention::Cdecl, conv)?; let reg_id = match idx { diff --git a/libafl_qemu/src/arch/hexagon.rs b/libafl_qemu/src/arch/hexagon.rs index 83ee00fee0..113313dd6c 100644 --- a/libafl_qemu/src/arch/hexagon.rs +++ b/libafl_qemu/src/arch/hexagon.rs @@ -92,10 +92,7 @@ impl Regs { pub type GuestReg = u32; impl crate::ArchExtras for crate::CPU { - fn read_return_address(&self) -> Result - where - T: From, - { + fn read_return_address(&self) -> Result { self.read_reg(Regs::Lr) } @@ -106,10 +103,11 @@ impl crate::ArchExtras for crate::CPU { self.write_reg(Regs::Lr, val) } - fn read_function_argument(&self, conv: CallingConvention, idx: u8) -> Result - where - T: From, - { + fn read_function_argument( + &self, + conv: CallingConvention, + idx: u8, + ) -> Result { QemuRWError::check_conv(QemuRWErrorKind::Read, CallingConvention::Cdecl, conv)?; // Note that 64 bit values may be passed in two registers (and may have padding), then this mapping is off. diff --git a/libafl_qemu/src/arch/i386.rs b/libafl_qemu/src/arch/i386.rs index dbc3f576e5..56bbd28562 100644 --- a/libafl_qemu/src/arch/i386.rs +++ b/libafl_qemu/src/arch/i386.rs @@ -49,14 +49,6 @@ impl Regs { pub const Pc: Regs = Regs::Eip; } -#[cfg(feature = "python")] -impl IntoPy for Regs { - fn into_py(self, py: Python) -> PyObject { - let n: i32 = self.into(); - n.into_py(py) - } -} - /// Return an X86 ArchCapstoneBuilder pub fn capstone() -> capstone::arch::x86::ArchCapstoneBuilder { capstone::Capstone::new() @@ -67,10 +59,7 @@ pub fn capstone() -> capstone::arch::x86::ArchCapstoneBuilder { pub type GuestReg = u32; impl crate::ArchExtras for crate::CPU { - fn read_return_address(&self) -> Result - where - T: From, - { + fn read_return_address(&self) -> Result { let stack_ptr: GuestReg = self.read_reg(Regs::Esp)?; let mut ret_addr = [0; size_of::()]; unsafe { self.read_mem(stack_ptr, &mut ret_addr) }; @@ -88,10 +77,11 @@ impl crate::ArchExtras for crate::CPU { Ok(()) } - fn read_function_argument(&self, conv: CallingConvention, idx: u8) -> Result - where - T: From, - { + fn read_function_argument( + &self, + conv: CallingConvention, + idx: u8, + ) -> Result { QemuRWError::check_conv(QemuRWErrorKind::Read, CallingConvention::Cdecl, conv)?; match idx { diff --git a/libafl_qemu/src/arch/mips.rs b/libafl_qemu/src/arch/mips.rs index 125443ed86..f770b96817 100644 --- a/libafl_qemu/src/arch/mips.rs +++ b/libafl_qemu/src/arch/mips.rs @@ -72,14 +72,6 @@ impl Regs { pub const Zero: Regs = Regs::R0; } -#[cfg(feature = "python")] -impl IntoPy for Regs { - fn into_py(self, py: Python) -> PyObject { - let n: i32 = self.into(); - n.into_py(py) - } -} - /// Return an MIPS ArchCapstoneBuilder pub fn capstone() -> capstone::arch::mips::ArchCapstoneBuilder { capstone::Capstone::new().mips() @@ -88,10 +80,7 @@ pub fn capstone() -> capstone::arch::mips::ArchCapstoneBuilder { pub type GuestReg = u32; impl crate::ArchExtras for crate::CPU { - fn read_return_address(&self) -> Result - where - T: From, - { + fn read_return_address(&self) -> Result { self.read_reg(Regs::Ra) } @@ -102,10 +91,11 @@ impl crate::ArchExtras for crate::CPU { self.write_reg(Regs::Ra, val) } - fn read_function_argument(&self, conv: CallingConvention, idx: u8) -> Result - where - T: From, - { + fn read_function_argument( + &self, + conv: CallingConvention, + idx: u8, + ) -> Result { QemuRWError::check_conv(QemuRWErrorKind::Read, CallingConvention::Cdecl, conv)?; let reg_id = match idx { diff --git a/libafl_qemu/src/arch/mod.rs b/libafl_qemu/src/arch/mod.rs index ff95a150be..d4ed7b978f 100644 --- a/libafl_qemu/src/arch/mod.rs +++ b/libafl_qemu/src/arch/mod.rs @@ -1,5 +1,12 @@ +#[cfg(feature = "python")] +use std::convert::Infallible; + +#[cfg(feature = "python")] +use pyo3::{prelude::*, types::PyInt}; + #[cfg(cpu_target = "aarch64")] pub mod aarch64; + #[cfg(all(cpu_target = "aarch64", not(feature = "clippy")))] pub use aarch64::*; @@ -32,3 +39,20 @@ pub use ppc::*; pub mod hexagon; #[cfg(cpu_target = "hexagon")] pub use hexagon::*; + +#[cfg(any(cpu_target = "riscv32", cpu_target = "riscv64"))] +pub mod riscv; +#[cfg(any(cpu_target = "riscv32", cpu_target = "riscv64"))] +pub use riscv::*; + +#[cfg(feature = "python")] +impl<'py> IntoPyObject<'py> for Regs { + type Target = PyInt; + type Output = Bound<'py, Self::Target>; + type Error = Infallible; + + fn into_pyobject(self, py: Python<'py>) -> Result { + let n: i32 = self.into(); + n.into_pyobject(py) + } +} diff --git a/libafl_qemu/src/arch/ppc.rs b/libafl_qemu/src/arch/ppc.rs index 351f3e3ea5..4bc7a64cb8 100644 --- a/libafl_qemu/src/arch/ppc.rs +++ b/libafl_qemu/src/arch/ppc.rs @@ -112,14 +112,6 @@ impl Regs { pub const Sp: Regs = Regs::R1; } -#[cfg(feature = "python")] -impl IntoPy for Regs { - fn into_py(self, py: Python) -> PyObject { - let n: i32 = self.into(); - n.into_py(py) - } -} - /// Return an MIPS ArchCapstoneBuilder pub fn capstone() -> capstone::arch::ppc::ArchCapstoneBuilder { capstone::Capstone::new().ppc() @@ -128,10 +120,7 @@ pub fn capstone() -> capstone::arch::ppc::ArchCapstoneBuilder { pub type GuestReg = u32; impl crate::ArchExtras for crate::CPU { - fn read_return_address(&self) -> Result - where - T: From, - { + fn read_return_address(&self) -> Result { self.read_reg(Regs::Lr) } @@ -142,10 +131,11 @@ impl crate::ArchExtras for crate::CPU { self.write_reg(Regs::Lr, val) } - fn read_function_argument(&self, conv: CallingConvention, idx: u8) -> Result - where - T: From, - { + fn read_function_argument( + &self, + conv: CallingConvention, + idx: u8, + ) -> Result { QemuRWError::check_conv(QemuRWErrorKind::Read, CallingConvention::Cdecl, conv)?; let reg_id = match idx { diff --git a/libafl_qemu/src/arch/riscv.rs b/libafl_qemu/src/arch/riscv.rs new file mode 100644 index 0000000000..99daa56f4b --- /dev/null +++ b/libafl_qemu/src/arch/riscv.rs @@ -0,0 +1,157 @@ +use core::ffi::c_long; +use std::sync::OnceLock; + +use capstone::arch::BuildsCapstone; +use enum_map::{enum_map, EnumMap}; +use num_enum::{IntoPrimitive, TryFromPrimitive}; +#[cfg(feature = "python")] +use pyo3::prelude::*; +pub use strum_macros::EnumIter; +#[cfg(feature = "riscv32")] +pub use syscall_numbers::riscv32::*; +#[cfg(feature = "riscv64")] +pub use syscall_numbers::riscv64::*; + +// QEMU specific +#[allow(non_upper_case_globals)] +pub const SYS_syscalls: c_long = 447; +#[allow(non_upper_case_globals)] +pub const SYS_riscv_flush_icache: c_long = SYS_arch_specific_syscall + 15; +#[allow(non_upper_case_globals)] +pub const SYS_riscv_hwprobe: c_long = SYS_arch_specific_syscall + 14; + +use crate::{sync_exit::ExitArgs, CallingConvention, QemuRWError, QemuRWErrorKind}; + +#[derive(IntoPrimitive, TryFromPrimitive, Debug, Clone, Copy, EnumIter)] +#[repr(i32)] +pub enum Regs { + Zero = 0, // x0: Hardwired zero + Ra = 1, // x1: Return address + Sp = 2, // x2: Stack pointer + Gp = 3, // x3: Global pointer + Tp = 4, // x4: Thread pointer + T0 = 5, // x5: Temporary register + T1 = 6, // x6: Temporary register + T2 = 7, // x7: Temporary register + FP = 8, // x8: Saved register / frame pointer + S1 = 9, // x9: Saved register + A0 = 10, // x10: Function argument / return value + A1 = 11, // x11: Function argument / return value + A2 = 12, // x12: Function argument + A3 = 13, // x13: Function argument + A4 = 14, // x14: Function argument + A5 = 15, // x15: Function argument + A6 = 16, // x16: Function argument + A7 = 17, // x17: Function argument + S2 = 18, // x18: Saved register + S3 = 19, // x19: Saved register + S4 = 20, // x20: Saved register + S5 = 21, // x21: Saved register + S6 = 22, // x22: Saved register + S7 = 23, // x23: Saved register + S8 = 24, // x24: Saved register + S9 = 25, // x25: Saved register + S10 = 26, // x26: Saved register + S11 = 27, // x27: Saved register + T3 = 28, // x28: Temporary register + T4 = 29, // x29: Temporary register + T5 = 30, // x30: Temporary register + T6 = 31, // x31: Temporary register + Pc = 32, // Program Counter (code pointer not actual register) +} + +static EXIT_ARCH_REGS: OnceLock> = OnceLock::new(); + +pub fn get_exit_arch_regs() -> &'static EnumMap { + EXIT_ARCH_REGS.get_or_init(|| { + enum_map! { + ExitArgs::Ret => Regs::A0, + ExitArgs::Cmd => Regs::A0, + ExitArgs::Arg1 => Regs::A1, + ExitArgs::Arg2 => Regs::A2, + ExitArgs::Arg3 => Regs::A3, + ExitArgs::Arg4 => Regs::A4, + ExitArgs::Arg5 => Regs::A5, + ExitArgs::Arg6 => Regs::A6, + } + }) +} + +#[cfg(not(feature = "riscv64"))] +pub type GuestReg = u32; +#[cfg(feature = "riscv64")] +pub type GuestReg = u64; + +/// Return a RISCV ArchCapstoneBuilder +pub fn capstone() -> capstone::arch::riscv::ArchCapstoneBuilder { + #[cfg(not(feature = "riscv64"))] + return capstone::Capstone::new() + .riscv() + .mode(capstone::arch::riscv::ArchMode::RiscV32); + #[cfg(feature = "riscv64")] + return capstone::Capstone::new() + .riscv() + .mode(capstone::arch::riscv::ArchMode::RiscV64); +} + +impl crate::ArchExtras for crate::CPU { + fn read_return_address(&self) -> Result { + self.read_reg(Regs::Ra) + } + + fn write_return_address(&self, val: T) -> Result<(), QemuRWError> + where + T: Into, + { + self.write_reg(Regs::Ra, val) + } + + fn read_function_argument( + &self, + conv: CallingConvention, + idx: u8, + ) -> Result { + QemuRWError::check_conv(QemuRWErrorKind::Read, CallingConvention::Cdecl, conv)?; + + // Note that 64 bit values may be passed in two registers (and are even-odd eg. A0, A2 and A3 where A1 is empty), then this mapping is off. + // Note: This does not consider the floating point registers. + // See https://riscv.org/wp-content/uploads/2015/01/riscv-calling.pdf + let reg_id = match idx { + 0 => Regs::A0, // argument / return value + 1 => Regs::A1, // argument / return value + 2 => Regs::A2, // argument value + 3 => Regs::A3, // argument value + 4 => Regs::A4, // argument value + 5 => Regs::A5, // argument value + 6 => Regs::A6, // argument value + 7 => Regs::A7, // argument value + r => { + return Err(QemuRWError::new_argument_error( + QemuRWErrorKind::Read, + i32::from(r), + )) + } + }; + + self.read_reg(reg_id) + } + + fn write_function_argument( + &self, + conv: CallingConvention, + idx: i32, + val: T, + ) -> Result<(), QemuRWError> + where + T: Into, + { + QemuRWError::check_conv(QemuRWErrorKind::Write, CallingConvention::Cdecl, conv)?; + + let val: GuestReg = val.into(); + match idx { + 0 => self.write_reg(Regs::A0, val), // argument / return value + 1 => self.write_reg(Regs::A1, val), // argument / return value + r => Err(QemuRWError::new_argument_error(QemuRWErrorKind::Write, r)), + } + } +} diff --git a/libafl_qemu/src/arch/x86_64.rs b/libafl_qemu/src/arch/x86_64.rs index 9e9538f083..119c5a9c62 100644 --- a/libafl_qemu/src/arch/x86_64.rs +++ b/libafl_qemu/src/arch/x86_64.rs @@ -1,10 +1,8 @@ -use std::{mem::size_of, sync::OnceLock}; +use std::{mem::size_of, ops::Range, sync::OnceLock}; use capstone::arch::BuildsCapstone; use enum_map::{enum_map, EnumMap}; use num_enum::{IntoPrimitive, TryFromPrimitive}; -#[cfg(feature = "python")] -use pyo3::prelude::*; pub use strum_macros::EnumIter; pub use syscall_numbers::x86_64::*; @@ -57,14 +55,6 @@ impl Regs { pub const Pc: Regs = Regs::Rip; } -#[cfg(feature = "python")] -impl IntoPy for Regs { - fn into_py(self, py: Python) -> PyObject { - let n: i32 = self.into(); - n.into_py(py) - } -} - /// Return an X86 `ArchCapstoneBuilder` #[must_use] pub fn capstone() -> capstone::arch::x86::ArchCapstoneBuilder { @@ -75,15 +65,14 @@ pub fn capstone() -> capstone::arch::x86::ArchCapstoneBuilder { pub type GuestReg = u64; +pub const PROCESS_ADDRESS_RANGE: Range = 0..0x0000_7fff_ffff_ffff; + impl crate::ArchExtras for crate::CPU { - fn read_return_address(&self) -> Result - where - T: From, - { + fn read_return_address(&self) -> Result { let stack_ptr: GuestReg = self.read_reg(Regs::Rsp)?; let mut ret_addr = [0; size_of::()]; - unsafe { self.read_mem(stack_ptr, &mut ret_addr) }; - Ok(GuestReg::from_le_bytes(ret_addr).into()) + unsafe { self.read_mem_unchecked(stack_ptr, &mut ret_addr) }; + Ok(GuestReg::from_le_bytes(ret_addr)) } fn write_return_address(&self, val: T) -> Result<(), QemuRWError> @@ -93,14 +82,15 @@ impl crate::ArchExtras for crate::CPU { let stack_ptr: GuestReg = self.read_reg(Regs::Rsp)?; let val: GuestReg = val.into(); let ret_addr = val.to_le_bytes(); - unsafe { self.write_mem(stack_ptr, &ret_addr) }; + unsafe { self.write_mem_unchecked(stack_ptr, &ret_addr) }; Ok(()) } - fn read_function_argument(&self, conv: CallingConvention, idx: u8) -> Result - where - T: From, - { + fn read_function_argument( + &self, + conv: CallingConvention, + idx: u8, + ) -> Result { QemuRWError::check_conv(QemuRWErrorKind::Read, CallingConvention::Cdecl, conv)?; let reg_id = match idx { diff --git a/libafl_qemu/src/breakpoint.rs b/libafl_qemu/src/breakpoint.rs index 0beadb2b77..e6c1c1ef2c 100644 --- a/libafl_qemu/src/breakpoint.rs +++ b/libafl_qemu/src/breakpoint.rs @@ -1,47 +1,65 @@ use std::{ borrow::Borrow, - fmt::{Display, Formatter}, + fmt::{Debug, Display, Formatter}, hash::{Hash, Hasher}, - rc::Rc, sync::{ atomic::{AtomicU64, Ordering}, OnceLock, }, }; -use libafl::state::{HasExecutions, State}; +use libafl::inputs::UsesInput; use libafl_qemu_sys::GuestAddr; -use crate::{ - command::{CommandManager, IsCommand}, - EmulatorExitHandler, Qemu, QemuHelperTuple, -}; +use crate::{command::CommandManager, Qemu}; #[repr(transparent)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub struct BreakpointId(u64); // TODO: distinguish breakpoints with IDs instead of addresses to avoid collisions. -#[derive(Debug)] -pub struct Breakpoint +pub struct Breakpoint where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { id: BreakpointId, addr: GuestAddr, - cmd: Option>>, + cmd: Option, disable_on_trigger: bool, enabled: bool, } +impl Clone for Breakpoint +where + CM: CommandManager, + S: UsesInput, +{ + fn clone(&self) -> Self { + Self { + id: self.id, + addr: self.addr, + cmd: self.cmd.clone(), + disable_on_trigger: self.disable_on_trigger, + enabled: self.enabled, + } + } +} + +impl Debug for Breakpoint +where + CM: CommandManager, + S: UsesInput, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "BP {:?} @ addr {:?}", self.id, self.addr) + } +} + impl BreakpointId { pub fn new() -> Self { - static mut BREAKPOINT_ID_COUNTER: OnceLock = OnceLock::new(); - - let counter = unsafe { BREAKPOINT_ID_COUNTER.get_or_init(|| AtomicU64::new(0)) }; + static BREAKPOINT_ID_COUNTER: OnceLock = OnceLock::new(); + let counter = BREAKPOINT_ID_COUNTER.get_or_init(|| AtomicU64::new(0)); BreakpointId(counter.fetch_add(1, Ordering::SeqCst)) } @@ -53,81 +71,67 @@ impl Default for BreakpointId { } } -impl Hash for Breakpoint +impl Hash for Breakpoint where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { fn hash(&self, state: &mut H) { self.id.hash(state); } } -impl PartialEq for Breakpoint +impl PartialEq for Breakpoint where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { fn eq(&self, other: &Self) -> bool { self.id == other.id } } -impl Eq for Breakpoint +impl Eq for Breakpoint where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { } -impl Display for Breakpoint +impl Display for Breakpoint where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { write!(f, "Breakpoint @vaddr 0x{:x}", self.addr) } } -impl Borrow for Breakpoint +impl Borrow for Breakpoint where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { fn borrow(&self) -> &BreakpointId { &self.id } } -impl Borrow for Breakpoint +impl Borrow for Breakpoint where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { fn borrow(&self) -> &GuestAddr { &self.addr } } -impl Breakpoint +impl Breakpoint where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { // Emu will return with the breakpoint as exit reason. #[must_use] @@ -143,15 +147,11 @@ where // Emu will execute the command when it meets the breakpoint. #[must_use] - pub fn with_command + 'static>( - addr: GuestAddr, - cmd: C, - disable_on_trigger: bool, - ) -> Self { + pub fn with_command(addr: GuestAddr, cmd: CM::Commands, disable_on_trigger: bool) -> Self { Self { id: BreakpointId::new(), addr, - cmd: Some(Rc::new(cmd)), + cmd: Some(cmd), disable_on_trigger, enabled: false, } @@ -167,21 +167,21 @@ where self.addr } - pub fn enable(&mut self, qemu: &Qemu) { + pub fn enable(&mut self, qemu: Qemu) { if !self.enabled { qemu.set_breakpoint(self.addr); self.enabled = true; } } - pub fn disable(&mut self, qemu: &Qemu) { + pub fn disable(&mut self, qemu: Qemu) { if self.enabled { qemu.remove_breakpoint(self.addr.into()); self.enabled = false; } } - pub fn trigger(&mut self, qemu: &Qemu) -> Option>> { + pub fn trigger(&mut self, qemu: Qemu) -> Option { if self.disable_on_trigger { self.disable(qemu); } diff --git a/libafl_qemu/src/command/mod.rs b/libafl_qemu/src/command/mod.rs index f2faf01cc0..ae87cfcc3f 100644 --- a/libafl_qemu/src/command/mod.rs +++ b/libafl_qemu/src/command/mod.rs @@ -1,35 +1,35 @@ -#[cfg(emulation_mode = "systemmode")] -use std::collections::HashSet; use std::{ - fmt::{Debug, Display, Error, Formatter}, - rc::Rc, + fmt, + fmt::{Debug, Display, Formatter}, + marker::PhantomData, + ops::Range, }; use enum_map::{Enum, EnumMap}; -use hashbrown::HashMap; use libafl::{ executors::ExitKind, - inputs::HasTargetBytes, - state::{HasExecutions, State}, + inputs::{HasTargetBytes, UsesInput}, }; use libafl_bolts::AsSlice; +use libafl_qemu_sys::GuestAddr; +#[cfg(feature = "systemmode")] +use libafl_qemu_sys::GuestPhysAddr; +use libc::c_uint; use num_enum::TryFromPrimitive; +use paste::paste; -#[cfg(emulation_mode = "systemmode")] -use crate::QemuInstrumentationPagingFilter; use crate::{ command::parser::{ EndCommandParser, InputPhysCommandParser, InputVirtCommandParser, LoadCommandParser, - NativeCommandParser, SaveCommandParser, StartPhysCommandParser, StartVirtCommandParser, - VaddrFilterAllowRangeCommandParser, VersionCommandParser, + LqprintfCommandParser, NativeCommandParser, SaveCommandParser, StartPhysCommandParser, + StartVirtCommandParser, TestCommandParser, VaddrFilterAllowRangeCommandParser, + VersionCommandParser, }, - executor::QemuExecutorState, get_exit_arch_regs, + modules::EmulatorModuleTuple, sync_exit::ExitArgs, - Emulator, EmulatorExitHandler, EmulatorMemoryChunk, ExitHandlerError, ExitHandlerResult, - GuestReg, HasInstrumentationFilter, InputLocation, IsFilter, IsSnapshotManager, Qemu, - QemuHelperTuple, QemuInstrumentationAddressRangeFilter, QemuRWError, Regs, - StdEmulatorExitHandler, StdInstrumentationFilter, CPU, + Emulator, EmulatorDriverError, EmulatorDriverResult, GuestReg, InputLocation, + IsSnapshotManager, Qemu, QemuMemoryChunk, QemuRWError, Regs, StdEmulatorDriver, CPU, }; pub mod parser; @@ -51,109 +51,149 @@ mod bindings { pub const VERSION: u64 = bindings::LIBAFL_QEMU_HDR_VERSION_NUMBER as u64; macro_rules! define_std_command_manager { - ($name:ident, [$($native_command_parser:ident),+]) => { - pub struct $name - where - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, - { - native_command_parsers: - HashMap, QT, S>>>, - } + ($name:ident, [$($command:ty),+], [$($native_command_parser:ty),+]) => { + paste! { + pub struct $name { + has_started: bool, + phantom: PhantomData, + } - impl $name - where - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, - { - #[must_use] - pub fn new() -> Self { - let native_parsers = Box::new( - vec![$(Box::new($native_command_parser) - as Box< - dyn NativeCommandParser< - Self, - StdEmulatorExitHandler, - QT, - S, - >, - >),*] - .into_iter(), - ); - - let mut parsers: HashMap< - GuestReg, - Box, QT, S>>, - > = HashMap::new(); - - for parser in native_parsers { - assert!(parsers - .insert(parser.command_id(), parser) - .is_none(), "Trying to use native commands with the same ID"); - } - - Self { - native_command_parsers: parsers, + impl Clone for $name { + fn clone(&self) -> Self { + Self { + has_started: self.has_started, + phantom: PhantomData, + } } } - } - impl CommandManager, QT, S> for $name - where - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, - { - fn parse( - &self, - qemu: Qemu, - ) -> Result, QT, S>>, CommandError> { - let arch_regs_map: &'static EnumMap = get_exit_arch_regs(); - let cmd_id: GuestReg = qemu.read_reg::(arch_regs_map[ExitArgs::Cmd])?; - - let cmd_parser = self - .native_command_parsers - .get(&cmd_id) - .ok_or(CommandError::UnknownCommand(cmd_id))?; - let cmd = cmd_parser.parse(qemu, arch_regs_map)?; - - Ok(cmd) + impl Debug for $name { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{} (has started? {:?})", stringify!($name), self.has_started) + } } - } - impl Debug for $name - where - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, - { - fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error> { - write!(f, stringify!($name)) + impl Default for $name { + fn default() -> Self { + Self { + has_started: false, + phantom: PhantomData, + } + } } - } - impl Default for $name - where - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, - { - fn default() -> Self { - Self::new() + impl $name { + fn start(&mut self) -> bool { + let tmp = self.has_started; + self.has_started = true; + tmp + } + + fn has_started(&self) -> bool { + self.has_started + } } + + impl CommandManager for $name + where + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, + S::Input: HasTargetBytes, + SM: IsSnapshotManager, + { + type Commands = [<$name Commands>]; + + #[deny(unreachable_patterns)] + fn parse(&self, qemu: Qemu) -> Result { + let arch_regs_map: &'static EnumMap = get_exit_arch_regs(); + let cmd_id = qemu.read_reg(arch_regs_map[ExitArgs::Cmd])? as c_uint; + + match cmd_id { + // >::COMMAND_ID => Ok(StdCommandManagerCommands::StartPhysCommandParserCmd(>::parse(qemu, arch_regs_map)?)), + $(<$native_command_parser as NativeCommandParser>::COMMAND_ID => Ok(<$native_command_parser as NativeCommandParser>::parse(qemu, arch_regs_map)?.into())),+, + _ => Err(CommandError::UnknownCommand(cmd_id.into())), + } + } + } + + #[derive(Clone, Debug)] + pub enum [<$name Commands>] + { + // StartPhysCommand(StartPhysCommand) + $($command($command)),+, + } + + impl IsCommand<$name, StdEmulatorDriver, ET, S, SM> for [<$name Commands>] + where + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, + S::Input: HasTargetBytes, + SM: IsSnapshotManager, + { + fn usable_at_runtime(&self) -> bool { + match self { + $([<$name Commands>]::$command(cmd) => <$command as IsCommand<$name, StdEmulatorDriver, ET, S, SM>>::usable_at_runtime(cmd)),+ + } + } + + fn run(&self, + emu: &mut Emulator<$name, StdEmulatorDriver, ET, S, SM>, + state: &mut S, + input: &S::Input, + ret_reg: Option + ) -> Result, StdEmulatorDriver, ET, S, SM>>, EmulatorDriverError> { + match self { + $([<$name Commands>]::$command(cmd) => cmd.run(emu, state, input, ret_reg)),+ + } + } + } + + $( + impl From<$command> for [<$name Commands>] { + fn from(cmd: $command) -> [<$name Commands>] { + [<$name Commands>]::$command(cmd) + } + } + )+ } }; } +pub trait CommandManager: Sized + Debug +where + S: UsesInput, +{ + type Commands: IsCommand; + + fn parse(&self, qemu: Qemu) -> Result; +} + +#[derive(Clone, Debug)] +pub struct NopCommandManager; +impl CommandManager for NopCommandManager +where + S: UsesInput, +{ + type Commands = NopCommand; + + fn parse(&self, _qemu: Qemu) -> Result { + Ok(NopCommand) + } +} + define_std_command_manager!( StdCommandManager, + [ + StartCommand, + InputCommand, + SaveCommand, + LoadCommand, + EndCommand, + VersionCommand, + AddressAllowCommand, + LqprintfCommand, + TestCommand + ], [ StartPhysCommandParser, StartVirtCommandParser, @@ -163,19 +203,12 @@ define_std_command_manager!( LoadCommandParser, EndCommandParser, VersionCommandParser, - VaddrFilterAllowRangeCommandParser + VaddrFilterAllowRangeCommandParser, + LqprintfCommandParser, + TestCommandParser ] ); -pub trait CommandManager: Sized -where - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, -{ - fn parse(&self, qemu: Qemu) -> Result>, CommandError>; -} - #[derive(Debug, Clone, Enum, TryFromPrimitive)] #[repr(u64)] pub enum NativeExitKind { @@ -184,12 +217,10 @@ pub enum NativeExitKind { Crash = bindings::LibaflQemuEndStatus_LIBAFL_QEMU_END_CRASH.0 as u64, // Crash reported in the VM } -pub trait IsCommand: Debug + Display +pub trait IsCommand: Clone + Debug where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { /// Used to know whether the command can be run during a backdoor, or if it is necessary to go out of /// the QEMU VM to run the command. @@ -200,25 +231,24 @@ where /// - `ret_reg`: The register in which the guest return value should be written, if any. /// Returns /// - `InnerHandlerResult`: How the high-level handler should behave + #[allow(clippy::type_complexity)] fn run( &self, - emu: &Emulator, - qemu_executor_state: &mut QemuExecutorState, + emu: &mut Emulator, + state: &mut S, input: &S::Input, ret_reg: Option, - ) -> Result>, ExitHandlerError>; + ) -> Result>, EmulatorDriverError>; } -#[cfg(emulation_mode = "systemmode")] -pub type PagingFilterCommand = FilterCommand; - -pub type AddressRangeFilterCommand = FilterCommand; - #[derive(Debug, Clone)] pub enum CommandError { UnknownCommand(GuestReg), RWError(QemuRWError), VersionDifference(u64), + TestDifference(GuestReg, GuestReg), // received, expected + StartedTwice, + EndBeforeStart, } impl From for CommandError { @@ -228,14 +258,41 @@ impl From for CommandError { } #[derive(Debug, Clone)] -pub struct SaveCommand; +pub struct NopCommand; -impl IsCommand, QT, S> for SaveCommand +impl Display for NopCommand { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "NopCommand") + } +} + +impl IsCommand for NopCommand where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, + CM: CommandManager, + S: UsesInput, +{ + fn usable_at_runtime(&self) -> bool { + true + } + + fn run( + &self, + _emu: &mut Emulator, + _state: &mut S, + _input: &S::Input, + _ret_reg: Option, + ) -> Result>, EmulatorDriverError> { + Ok(None) + } +} + +#[derive(Debug, Clone)] +pub struct SaveCommand; +impl IsCommand for SaveCommand +where + ET: EmulatorModuleTuple, + CM: CommandManager, + S: UsesInput + Unpin, SM: IsSnapshotManager, { fn usable_at_runtime(&self) -> bool { @@ -244,40 +301,18 @@ where fn run( &self, - emu: &Emulator, QT, S>, - #[cfg(emulation_mode = "systemmode")] qemu_executor_state: &mut QemuExecutorState, - #[cfg(not(emulation_mode = "systemmode"))] _qemu_executor_state: &mut QemuExecutorState< - QT, - S, - >, + emu: &mut Emulator, + _state: &mut S, _input: &S::Input, _ret_reg: Option, - ) -> Result, QT, S>>, ExitHandlerError> + ) -> Result>, EmulatorDriverError> { let qemu = emu.qemu(); - let emu_exit_handler = emu.exit_handler().borrow_mut(); + let snapshot_id = emu.snapshot_manager_mut().save(qemu); - let snapshot_id = emu_exit_handler.snapshot_manager_borrow_mut().save(qemu); - emu_exit_handler + emu.driver_mut() .set_snapshot_id(snapshot_id) - .map_err(|_| ExitHandlerError::MultipleSnapshotDefinition)?; - - #[cfg(emulation_mode = "systemmode")] - { - let qemu_helpers = qemu_executor_state.hooks_mut().helpers_mut(); - - let mut allowed_paging_ids = HashSet::new(); - - let current_paging_id = qemu.current_cpu().unwrap().current_paging_id().unwrap(); - allowed_paging_ids.insert(current_paging_id); - - let paging_filter = - HasInstrumentationFilter::::filter_mut( - qemu_helpers, - ); - - *paging_filter = QemuInstrumentationPagingFilter::AllowList(allowed_paging_ids); - } + .map_err(|_| EmulatorDriverError::MultipleSnapshotDefinition)?; Ok(None) } @@ -286,12 +321,10 @@ where #[derive(Debug, Clone)] pub struct LoadCommand; -impl IsCommand, QT, S> for LoadCommand +impl IsCommand for LoadCommand where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, + CM: CommandManager, + S: UsesInput, SM: IsSnapshotManager, { fn usable_at_runtime(&self) -> bool { @@ -300,27 +333,23 @@ where fn run( &self, - emu: &Emulator, QT, S>, - _qemu_executor_state: &mut QemuExecutorState, + emu: &mut Emulator, + _state: &mut S, _input: &S::Input, _ret_reg: Option, - ) -> Result, QT, S>>, ExitHandlerError> + ) -> Result>, EmulatorDriverError> { let qemu = emu.qemu(); - let emu_exit_handler = emu.exit_handler().borrow_mut(); - let snapshot_id = emu_exit_handler + let snapshot_id = emu + .driver_mut() .snapshot_id() - .ok_or(ExitHandlerError::SnapshotNotFound)?; + .ok_or(EmulatorDriverError::SnapshotNotFound)?; - emu_exit_handler - .snapshot_manager_borrow_mut() - .restore(&snapshot_id, qemu)?; + emu.snapshot_manager_mut().restore(qemu, &snapshot_id)?; #[cfg(feature = "paranoid_debug")] - emu_exit_handler - .snapshot_manager_borrow() - .check(&snapshot_id, emu.qemu())?; + emu.snapshot_manager_mut().check(qemu, &snapshot_id)?; Ok(None) } @@ -328,17 +357,15 @@ where #[derive(Debug, Clone)] pub struct InputCommand { - location: EmulatorMemoryChunk, + location: QemuMemoryChunk, cpu: CPU, } -impl IsCommand, QT, S> for InputCommand +impl IsCommand for InputCommand where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, S::Input: HasTargetBytes, - SM: IsSnapshotManager, { fn usable_at_runtime(&self) -> bool { true @@ -346,15 +373,17 @@ where fn run( &self, - emu: &Emulator, QT, S>, - _qemu_executor_state: &mut QemuExecutorState, + emu: &mut Emulator, + _state: &mut S, input: &S::Input, ret_reg: Option, - ) -> Result, QT, S>>, ExitHandlerError> - { + ) -> Result>, EmulatorDriverError> { let qemu = emu.qemu(); - let ret_value = self.location.write(qemu, input.target_bytes().as_slice()); + let ret_value = self + .location + .write(qemu, input.target_bytes().as_slice()) + .unwrap(); if let Some(reg) = ret_reg { self.cpu.write_reg(reg, ret_value).unwrap(); @@ -366,14 +395,12 @@ where #[derive(Debug, Clone)] pub struct StartCommand { - input_location: EmulatorMemoryChunk, + input_location: QemuMemoryChunk, } - -impl IsCommand, QT, S> for StartCommand +impl IsCommand, StdEmulatorDriver, ET, S, SM> for StartCommand where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, S::Input: HasTargetBytes, SM: IsSnapshotManager, { @@ -383,21 +410,32 @@ where fn run( &self, - emu: &Emulator, QT, S>, - _qemu_executor_state: &mut QemuExecutorState, + emu: &mut Emulator, StdEmulatorDriver, ET, S, SM>, + state: &mut S, input: &S::Input, ret_reg: Option, - ) -> Result, QT, S>>, ExitHandlerError> - { - let emu_exit_handler = emu.exit_handler().borrow_mut(); + ) -> Result< + Option, StdEmulatorDriver, ET, S, SM>>, + EmulatorDriverError, + > { + if emu.command_manager_mut().start() { + return Err(EmulatorDriverError::CommandError( + CommandError::StartedTwice, + )); + } + let qemu = emu.qemu(); - let snapshot_id = emu_exit_handler.snapshot_manager_borrow_mut().save(qemu); - emu_exit_handler + // Snapshot VM + let snapshot_id = emu.snapshot_manager_mut().save(qemu); + + // Set snapshot ID to restore to after fuzzing ends + emu.driver_mut() .set_snapshot_id(snapshot_id) - .map_err(|_| ExitHandlerError::MultipleSnapshotDefinition)?; + .map_err(|_| EmulatorDriverError::MultipleSnapshotDefinition)?; - emu_exit_handler + // Save input location for next runs + emu.driver_mut() .set_input_location(InputLocation::new( self.input_location.clone(), qemu.current_cpu().unwrap(), @@ -405,26 +443,57 @@ where )) .unwrap(); + // Write input to input location let ret_value = self .input_location - .write(qemu, input.target_bytes().as_slice()); + .write(qemu, input.target_bytes().as_slice()) + .unwrap(); + // Unleash hooks if locked + if emu.driver_mut().unlock_hooks() { + // Prepare hooks + emu.modules_mut().first_exec_all(state); + emu.modules_mut().pre_exec_all(state, input); + } + + // Auto page filtering if option is enabled + #[cfg(feature = "systemmode")] + if emu.driver_mut().allow_page_on_start() { + if let Some(page_id) = qemu.current_cpu().unwrap().current_paging_id() { + emu.modules_mut().modules_mut().allow_page_id_all(page_id); + } + } + + #[cfg(feature = "x86_64")] + if emu.driver_mut().is_process_only() { + emu.modules_mut() + .modules_mut() + .allow_address_range_all(crate::PROCESS_ADDRESS_RANGE); + } + + // Make sure JIT cache is empty just before starting + qemu.flush_jit(); + + // Set input size in return register if there is any if let Some(reg) = ret_reg { qemu.write_reg(reg, ret_value).unwrap(); } + log::info!("Fuzzing starts"); + Ok(None) } } #[derive(Debug, Clone)] -pub struct EndCommand(Option); +pub struct EndCommand { + exit_kind: Option, +} -impl IsCommand, QT, S> for EndCommand +impl IsCommand, StdEmulatorDriver, ET, S, SM> for EndCommand where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, S::Input: HasTargetBytes, SM: IsSnapshotManager, { @@ -434,41 +503,45 @@ where fn run( &self, - emu: &Emulator, QT, S>, - _qemu_executor_state: &mut QemuExecutorState, + emu: &mut Emulator, StdEmulatorDriver, ET, S, SM>, + _state: &mut S, _input: &S::Input, _ret_reg: Option, - ) -> Result, QT, S>>, ExitHandlerError> - { - let emu_exit_handler = emu.exit_handler().borrow_mut(); + ) -> Result< + Option, StdEmulatorDriver, ET, S, SM>>, + EmulatorDriverError, + > { + let qemu = emu.qemu(); - let snapshot_id = emu_exit_handler + if !emu.command_manager_mut().has_started() { + return Err(EmulatorDriverError::CommandError( + CommandError::EndBeforeStart, + )); + } + + let snapshot_id = emu + .driver_mut() .snapshot_id() - .ok_or(ExitHandlerError::SnapshotNotFound)?; + .ok_or(EmulatorDriverError::SnapshotNotFound)?; - emu_exit_handler - .snapshot_manager_borrow_mut() - .restore(&snapshot_id, emu.qemu())?; + emu.snapshot_manager_mut().restore(qemu, &snapshot_id)?; #[cfg(feature = "paranoid_debug")] - emu_exit_handler - .snapshot_manager_borrow() - .check(&snapshot_id, emu.qemu())?; + emu.snapshot_manager_mut().check(qemu, &snapshot_id)?; - Ok(Some(ExitHandlerResult::EndOfRun(self.0.unwrap()))) + Ok(Some(EmulatorDriverResult::EndOfRun( + self.exit_kind.unwrap(), + ))) } } #[derive(Debug, Clone)] pub struct VersionCommand(u64); -impl IsCommand, QT, S> for VersionCommand +impl IsCommand for VersionCommand where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, + CM: CommandManager, + S: UsesInput, { fn usable_at_runtime(&self) -> bool { true @@ -476,40 +549,35 @@ where fn run( &self, - _emu: &Emulator, QT, S>, - _qemu_executor_state: &mut QemuExecutorState, + _emu: &mut Emulator, + _state: &mut S, _input: &S::Input, _ret_reg: Option, - ) -> Result, QT, S>>, ExitHandlerError> - { + ) -> Result>, EmulatorDriverError> { let guest_version = self.0; if VERSION == guest_version { Ok(None) } else { - Err(ExitHandlerError::CommandError( + Err(EmulatorDriverError::CommandError( CommandError::VersionDifference(guest_version), )) } } } +#[cfg(feature = "systemmode")] #[derive(Debug, Clone)] -pub struct FilterCommand -where - T: IsFilter + Debug, -{ - filter: T, +pub struct PageAllowCommand { + page_id: GuestPhysAddr, } -#[cfg(emulation_mode = "systemmode")] -impl IsCommand, QT, S> for PagingFilterCommand +#[cfg(feature = "systemmode")] +impl IsCommand for PageAllowCommand where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, + ET: EmulatorModuleTuple, + CM: CommandManager, + S: UsesInput + Unpin, { fn usable_at_runtime(&self) -> bool { true @@ -517,57 +585,121 @@ where fn run( &self, - _emu: &Emulator, QT, S>, - qemu_executor_state: &mut QemuExecutorState, + emu: &mut Emulator, + _state: &mut S, _input: &S::Input, _ret_reg: Option, - ) -> Result, QT, S>>, ExitHandlerError> - { - let qemu_helpers = qemu_executor_state.hooks_mut().helpers_mut(); - - let paging_filter = - HasInstrumentationFilter::::filter_mut(qemu_helpers); - - *paging_filter = self.filter.clone(); - + ) -> Result>, EmulatorDriverError> { + emu.modules_mut() + .modules_mut() + .allow_page_id_all(self.page_id.clone()); Ok(None) } } -impl IsCommand, QT, S> for AddressRangeFilterCommand +#[derive(Debug, Clone)] +pub struct AddressAllowCommand { + address_range: Range, +} +impl IsCommand for AddressAllowCommand where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, + ET: EmulatorModuleTuple, + CM: CommandManager, + S: UsesInput + Unpin, { fn usable_at_runtime(&self) -> bool { true } - #[allow(clippy::type_complexity)] // TODO: refactor with correct type. fn run( &self, - _emu: &Emulator, QT, S>, - qemu_executor_state: &mut QemuExecutorState, + emu: &mut Emulator, + _state: &mut S, _input: &S::Input, _ret_reg: Option, - ) -> Result, QT, S>>, ExitHandlerError> - { - let qemu_helpers = qemu_executor_state.hooks_mut().helpers_mut(); - - let addr_range_filter = - HasInstrumentationFilter::::filter_mut( - qemu_helpers, - ); - - *addr_range_filter = self.filter.clone(); - + ) -> Result>, EmulatorDriverError> { + emu.modules_mut() + .modules_mut() + .allow_address_range_all(self.address_range.clone()); Ok(None) } } +#[derive(Debug, Clone)] +pub struct LqprintfCommand { + content: String, +} +impl IsCommand for LqprintfCommand +where + ET: EmulatorModuleTuple, + CM: CommandManager, + S: UsesInput + Unpin, +{ + fn usable_at_runtime(&self) -> bool { + true + } + + fn run( + &self, + _emu: &mut Emulator, + _state: &mut S, + _input: &S::Input, + _ret_reg: Option, + ) -> Result>, EmulatorDriverError> { + print!("LQPRINTF: {}", self.content); + Ok(None) + } +} + +#[derive(Debug, Clone)] +pub struct TestCommand { + expected_value: GuestReg, + received_value: GuestReg, +} +impl IsCommand for TestCommand +where + ET: EmulatorModuleTuple, + CM: CommandManager, + S: UsesInput + Unpin, +{ + fn usable_at_runtime(&self) -> bool { + true + } + + fn run( + &self, + _emu: &mut Emulator, + _state: &mut S, + _input: &S::Input, + _ret_reg: Option, + ) -> Result>, EmulatorDriverError> { + if self.expected_value == self.received_value { + Ok(None) + } else { + Err(EmulatorDriverError::CommandError( + CommandError::TestDifference(self.received_value, self.expected_value), + )) + } + } +} + +impl TestCommand { + #[must_use] + pub fn new(received_value: GuestReg, expected_value: GuestReg) -> Self { + Self { + expected_value, + received_value, + } + } +} + +impl LqprintfCommand { + #[must_use] + pub fn new(content: String) -> Self { + Self { content } + } +} + impl VersionCommand { #[must_use] pub fn new(version: u64) -> Self { @@ -575,12 +707,10 @@ impl VersionCommand { } } -impl FilterCommand -where - T: IsFilter + Debug, -{ - pub fn new(filter: T) -> Self { - Self { filter } +impl AddressAllowCommand { + #[must_use] + pub fn new(address_range: Range) -> Self { + Self { address_range } } } @@ -614,7 +744,7 @@ impl Display for StartCommand { impl Display for EndCommand { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "Exit of kind {:?}", self.0) + write!(f, "Exit of kind {:?}", self.exit_kind) } } @@ -624,22 +754,22 @@ impl Display for VersionCommand { } } -impl Display for AddressRangeFilterCommand { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "Addr range filter: {:?}", self.filter,) +impl Display for AddressAllowCommand { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "Addr range allow: {:?}", self.address_range) } } -#[cfg(emulation_mode = "systemmode")] -impl Display for PagingFilterCommand { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "Addr range filter: {:?}", self.filter,) +#[cfg(feature = "systemmode")] +impl Display for PageAllowCommand { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "Allowed page: {:?}", self.page_id) } } impl StartCommand { #[must_use] - pub fn new(input_location: EmulatorMemoryChunk) -> Self { + pub fn new(input_location: QemuMemoryChunk) -> Self { Self { input_location } } } @@ -647,13 +777,13 @@ impl StartCommand { impl EndCommand { #[must_use] pub fn new(exit_kind: Option) -> Self { - Self(exit_kind) + Self { exit_kind } } } impl InputCommand { #[must_use] - pub fn new(location: EmulatorMemoryChunk, cpu: CPU) -> Self { + pub fn new(location: QemuMemoryChunk, cpu: CPU) -> Self { Self { location, cpu } } } diff --git a/libafl_qemu/src/command/parser.rs b/libafl_qemu/src/command/parser.rs index d1b9cedfd6..97914b266d 100644 --- a/libafl_qemu/src/command/parser.rs +++ b/libafl_qemu/src/command/parser.rs @@ -1,225 +1,211 @@ -use std::{fmt::Debug, rc::Rc, sync::OnceLock}; +use std::{ffi::CStr, sync::OnceLock}; use enum_map::{enum_map, EnumMap}; use libafl::{ executors::ExitKind, - inputs::HasTargetBytes, - state::{HasExecutions, State}, + inputs::{HasTargetBytes, UsesInput}, }; +use libafl_bolts::AsSliceMut; use libafl_qemu_sys::{GuestAddr, GuestPhysAddr, GuestVirtAddr}; +use libc::c_uint; use crate::{ command::{ - bindings, CommandError, CommandManager, EndCommand, FilterCommand, InputCommand, IsCommand, - LoadCommand, NativeExitKind, SaveCommand, StartCommand, VersionCommand, + bindings, AddressAllowCommand, CommandError, CommandManager, EndCommand, InputCommand, + IsCommand, LoadCommand, LqprintfCommand, NativeExitKind, SaveCommand, StartCommand, + StdCommandManager, TestCommand, VersionCommand, }, + modules::EmulatorModuleTuple, sync_exit::ExitArgs, - EmulatorExitHandler, EmulatorMemoryChunk, GuestReg, IsSnapshotManager, Qemu, QemuHelperTuple, - QemuInstrumentationAddressRangeFilter, Regs, StdEmulatorExitHandler, StdInstrumentationFilter, + GuestReg, IsSnapshotManager, Qemu, QemuMemoryChunk, Regs, StdEmulatorDriver, }; pub static EMU_EXIT_KIND_MAP: OnceLock>> = OnceLock::new(); -pub trait NativeCommandParser +pub trait NativeCommandParser where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { - fn command_id(&self) -> GuestReg; + type OutputCommand: IsCommand; + + const COMMAND_ID: c_uint; fn parse( - &self, qemu: Qemu, arch_regs_map: &'static EnumMap, - ) -> Result>, CommandError>; + ) -> Result; } pub struct InputPhysCommandParser; -impl NativeCommandParser, QT, S> - for InputPhysCommandParser +impl NativeCommandParser for InputPhysCommandParser where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, S::Input: HasTargetBytes, - SM: IsSnapshotManager, { - fn command_id(&self) -> GuestReg { - GuestReg::from(bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_INPUT_PHYS.0) - } + type OutputCommand = InputCommand; + + const COMMAND_ID: c_uint = bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_INPUT_PHYS.0; fn parse( - &self, qemu: Qemu, arch_regs_map: &'static EnumMap, - ) -> Result, QT, S>>, CommandError> { - let input_phys_addr: GuestPhysAddr = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?; + ) -> Result { + let input_phys_addr: GuestPhysAddr = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?.into(); let max_input_size: GuestReg = qemu.read_reg(arch_regs_map[ExitArgs::Arg2])?; - Ok(Rc::new(InputCommand::new( - EmulatorMemoryChunk::phys( + Ok(InputCommand::new( + QemuMemoryChunk::phys( input_phys_addr, max_input_size, Some(qemu.current_cpu().unwrap()), ), qemu.current_cpu().unwrap(), - ))) + )) } } pub struct InputVirtCommandParser; -impl NativeCommandParser, QT, S> - for InputVirtCommandParser +impl NativeCommandParser for InputVirtCommandParser where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, + S::Input: HasTargetBytes, +{ + type OutputCommand = InputCommand; + + const COMMAND_ID: c_uint = bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_INPUT_VIRT.0; + + fn parse( + qemu: Qemu, + arch_regs_map: &'static EnumMap, + ) -> Result { + let input_virt_addr: GuestVirtAddr = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?.into(); + let max_input_size: GuestReg = qemu.read_reg(arch_regs_map[ExitArgs::Arg2])?; + + Ok(InputCommand::new( + QemuMemoryChunk::virt(input_virt_addr, max_input_size, qemu.current_cpu().unwrap()), + qemu.current_cpu().unwrap(), + )) + } +} + +pub struct StartPhysCommandParser; + +impl NativeCommandParser, StdEmulatorDriver, ET, S, SM> + for StartPhysCommandParser +where + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, S::Input: HasTargetBytes, SM: IsSnapshotManager, { - fn command_id(&self) -> GuestReg { - GuestReg::from(bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_INPUT_VIRT.0) - } + type OutputCommand = StartCommand; + + const COMMAND_ID: c_uint = bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_START_PHYS.0; fn parse( - &self, qemu: Qemu, arch_regs_map: &'static EnumMap, - ) -> Result, QT, S>>, CommandError> { - let input_virt_addr: GuestVirtAddr = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?; + ) -> Result { + let input_phys_addr: GuestPhysAddr = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?.into(); let max_input_size: GuestReg = qemu.read_reg(arch_regs_map[ExitArgs::Arg2])?; - Ok(Rc::new(InputCommand::new( - EmulatorMemoryChunk::virt(input_virt_addr, max_input_size, qemu.current_cpu().unwrap()), + Ok(StartCommand::new(QemuMemoryChunk::phys( + input_phys_addr, + max_input_size, + Some(qemu.current_cpu().unwrap()), + ))) + } +} + +pub struct StartVirtCommandParser; + +impl NativeCommandParser, StdEmulatorDriver, ET, S, SM> + for StartVirtCommandParser +where + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, + S::Input: HasTargetBytes, + SM: IsSnapshotManager, +{ + type OutputCommand = StartCommand; + + const COMMAND_ID: c_uint = bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_START_VIRT.0; + + fn parse( + qemu: Qemu, + arch_regs_map: &'static EnumMap, + ) -> Result { + let input_virt_addr: GuestVirtAddr = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?.into(); + let max_input_size: GuestReg = qemu.read_reg(arch_regs_map[ExitArgs::Arg2])?; + + Ok(StartCommand::new(QemuMemoryChunk::virt( + input_virt_addr, + max_input_size, qemu.current_cpu().unwrap(), ))) } } -pub struct StartPhysCommandParser; -impl NativeCommandParser, QT, S> - for StartPhysCommandParser -where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, -{ - fn command_id(&self) -> GuestReg { - GuestReg::from(bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_START_PHYS.0) - } - - fn parse( - &self, - qemu: Qemu, - arch_regs_map: &'static EnumMap, - ) -> Result, QT, S>>, CommandError> { - let input_phys_addr: GuestPhysAddr = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?; - let max_input_size: GuestReg = qemu.read_reg(arch_regs_map[ExitArgs::Arg2])?; - - Ok(Rc::new(StartCommand::new(EmulatorMemoryChunk::phys( - input_phys_addr, - max_input_size, - Some(qemu.current_cpu().unwrap()), - )))) - } -} - -pub struct StartVirtCommandParser; -impl NativeCommandParser, QT, S> - for StartVirtCommandParser -where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, -{ - fn command_id(&self) -> GuestReg { - GuestReg::from(bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_START_VIRT.0) - } - - fn parse( - &self, - qemu: Qemu, - arch_regs_map: &'static EnumMap, - ) -> Result, QT, S>>, CommandError> { - let input_virt_addr: GuestVirtAddr = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?; - let max_input_size: GuestReg = qemu.read_reg(arch_regs_map[ExitArgs::Arg2])?; - - Ok(Rc::new(StartCommand::new(EmulatorMemoryChunk::virt( - input_virt_addr, - max_input_size, - qemu.current_cpu().unwrap(), - )))) - } -} - pub struct SaveCommandParser; -impl NativeCommandParser, QT, S> for SaveCommandParser +impl NativeCommandParser for SaveCommandParser where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, + ET: EmulatorModuleTuple, + CM: CommandManager, + S: UsesInput + Unpin, SM: IsSnapshotManager, { - fn command_id(&self) -> GuestReg { - GuestReg::from(bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_SAVE.0) - } + type OutputCommand = SaveCommand; + + const COMMAND_ID: c_uint = bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_SAVE.0; fn parse( - &self, _qemu: Qemu, _arch_regs_map: &'static EnumMap, - ) -> Result, QT, S>>, CommandError> { - Ok(Rc::new(SaveCommand)) + ) -> Result { + Ok(SaveCommand) } } pub struct LoadCommandParser; -impl NativeCommandParser, QT, S> for LoadCommandParser +impl NativeCommandParser for LoadCommandParser where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, + CM: CommandManager, + S: UsesInput, SM: IsSnapshotManager, { - fn command_id(&self) -> GuestReg { - GuestReg::from(bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_LOAD.0) - } + type OutputCommand = LoadCommand; + + const COMMAND_ID: c_uint = bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_LOAD.0; fn parse( - &self, _qemu: Qemu, _arch_regs_map: &'static EnumMap, - ) -> Result, QT, S>>, CommandError> { - Ok(Rc::new(LoadCommand)) + ) -> Result { + Ok(LoadCommand) } } pub struct EndCommandParser; -impl NativeCommandParser, QT, S> for EndCommandParser + +impl NativeCommandParser, StdEmulatorDriver, ET, S, SM> + for EndCommandParser where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, S::Input: HasTargetBytes, SM: IsSnapshotManager, { - fn command_id(&self) -> GuestReg { - GuestReg::from(bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_END.0) - } + type OutputCommand = EndCommand; + + const COMMAND_ID: c_uint = bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_END.0; fn parse( - &self, qemu: Qemu, arch_regs_map: &'static EnumMap, - ) -> Result, QT, S>>, CommandError> { + ) -> Result { let native_exit_kind: GuestReg = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?; let native_exit_kind: Result = u64::from(native_exit_kind).try_into(); @@ -233,60 +219,113 @@ where })[k] }); - Ok(Rc::new(EndCommand::new(exit_kind))) + Ok(EndCommand::new(exit_kind)) } } pub struct VersionCommandParser; -impl NativeCommandParser, QT, S> - for VersionCommandParser +impl NativeCommandParser for VersionCommandParser where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, + CM: CommandManager, + S: UsesInput, { - fn command_id(&self) -> GuestReg { - GuestReg::from(bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_VERSION.0) - } + type OutputCommand = VersionCommand; + + const COMMAND_ID: c_uint = bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_VERSION.0; fn parse( - &self, qemu: Qemu, arch_regs_map: &'static EnumMap, - ) -> Result, QT, S>>, CommandError> { - let client_version = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?; + ) -> Result { + let client_version = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?.into(); - Ok(Rc::new(VersionCommand::new(client_version))) + Ok(VersionCommand::new(client_version)) } } pub struct VaddrFilterAllowRangeCommandParser; -impl NativeCommandParser, QT, S> +impl NativeCommandParser for VaddrFilterAllowRangeCommandParser where - CM: CommandManager, QT, S>, - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, + ET: EmulatorModuleTuple, + CM: CommandManager, + S: UsesInput + Unpin, { - fn command_id(&self) -> GuestReg { - GuestReg::from(bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_VADDR_FILTER_ALLOW.0) - } + type OutputCommand = AddressAllowCommand; + + const COMMAND_ID: c_uint = bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_VADDR_FILTER_ALLOW.0; fn parse( - &self, qemu: Qemu, arch_regs_map: &'static EnumMap, - ) -> Result, QT, S>>, CommandError> { + ) -> Result { let vaddr_start: GuestAddr = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?; let vaddr_end: GuestAddr = qemu.read_reg(arch_regs_map[ExitArgs::Arg2])?; - Ok(Rc::new(FilterCommand::new( - #[allow(clippy::single_range_in_vec_init)] - QemuInstrumentationAddressRangeFilter::AllowList(vec![vaddr_start..vaddr_end]), - ))) + Ok(AddressAllowCommand::new(vaddr_start..vaddr_end)) + } +} + +pub struct LqprintfCommandParser; +impl NativeCommandParser for LqprintfCommandParser +where + ET: EmulatorModuleTuple, + CM: CommandManager, + S: UsesInput + Unpin, +{ + type OutputCommand = LqprintfCommand; + const COMMAND_ID: c_uint = bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_LQPRINTF.0; + + #[allow(clippy::uninit_vec)] + fn parse( + qemu: Qemu, + arch_regs_map: &'static EnumMap, + ) -> Result { + let buf_addr: GuestAddr = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?; + let str_size: usize = qemu + .read_reg(arch_regs_map[ExitArgs::Arg2])? + .try_into() + .unwrap(); // without null byte + let cpu = qemu.current_cpu().unwrap(); + + let total_size = str_size + 1; + + let mut str_copy: Vec = unsafe { + let mut res = Vec::::with_capacity(total_size); + res.set_len(total_size); + res + }; + + let mem_chunk = + QemuMemoryChunk::virt(buf_addr as GuestVirtAddr, total_size as GuestReg, cpu); + mem_chunk.read(qemu, str_copy.as_slice_mut())?; + + let c_str: &CStr = CStr::from_bytes_with_nul(str_copy.as_slice()).unwrap(); + + Ok(LqprintfCommand::new(c_str.to_str().unwrap().to_string())) + } +} + +pub struct TestCommandParser; +impl NativeCommandParser for TestCommandParser +where + ET: EmulatorModuleTuple, + CM: CommandManager, + S: UsesInput + Unpin, +{ + type OutputCommand = TestCommand; + const COMMAND_ID: c_uint = bindings::LibaflQemuCommand_LIBAFL_QEMU_COMMAND_TEST.0; + + #[allow(clippy::cast_sign_loss)] + fn parse( + qemu: Qemu, + arch_regs_map: &'static EnumMap, + ) -> Result { + let received_value: GuestReg = qemu.read_reg(arch_regs_map[ExitArgs::Arg1])?; + + Ok(TestCommand::new( + received_value, + GuestReg::from(bindings::LIBAFL_QEMU_TEST_VALUE), + )) } } diff --git a/libafl_qemu/src/emu/builder.rs b/libafl_qemu/src/emu/builder.rs new file mode 100644 index 0000000000..67396d2c10 --- /dev/null +++ b/libafl_qemu/src/emu/builder.rs @@ -0,0 +1,244 @@ +use std::{fmt::Debug, marker::PhantomData}; + +use libafl::{ + inputs::{HasTargetBytes, UsesInput}, + state::{HasExecutions, State}, +}; +use libafl_bolts::tuples::{tuple_list, Prepend}; + +#[cfg(feature = "systemmode")] +use crate::FastSnapshotManager; +use crate::{ + command::{CommandManager, NopCommandManager, StdCommandManager}, + config::QemuConfig, + modules::{EmulatorModule, EmulatorModuleTuple}, + Emulator, EmulatorHooks, NopEmulatorDriver, NopSnapshotManager, Qemu, QemuHooks, QemuInitError, + StdEmulatorDriver, StdSnapshotManager, +}; + +#[derive(Clone, Debug)] +enum QemuBuilder { + Qemu(Qemu), + QemuConfig(QemuConfig), + QemuString(Vec), +} + +#[derive(Clone, Debug)] +pub struct EmulatorBuilder +where + S: UsesInput, +{ + modules: ET, + driver: ED, + snapshot_manager: SM, + command_manager: CM, + qemu_builder: Option, + phantom: PhantomData, +} + +impl EmulatorBuilder +where + S: UsesInput, +{ + #[must_use] + pub fn empty() -> Self { + Self { + modules: tuple_list!(), + driver: NopEmulatorDriver, + snapshot_manager: NopSnapshotManager, + command_manager: NopCommandManager, + qemu_builder: None, + phantom: PhantomData, + } + } +} + +#[cfg(feature = "usermode")] +impl EmulatorBuilder, StdEmulatorDriver, (), S, StdSnapshotManager> +where + S: State + HasExecutions + Unpin, + S::Input: HasTargetBytes, +{ + #[must_use] + #[allow(clippy::should_implement_trait)] + pub fn default() -> Self { + Self { + modules: tuple_list!(), + command_manager: StdCommandManager::default(), + snapshot_manager: StdSnapshotManager::default(), + driver: StdEmulatorDriver::builder().build(), + qemu_builder: None, + phantom: PhantomData, + } + } +} + +#[cfg(feature = "systemmode")] +impl EmulatorBuilder, StdEmulatorDriver, (), S, StdSnapshotManager> +where + S: State + HasExecutions + Unpin, + S::Input: HasTargetBytes, +{ + pub fn default() -> Self { + Self { + modules: (), + command_manager: StdCommandManager::default(), + snapshot_manager: FastSnapshotManager::default(), + driver: StdEmulatorDriver::builder().build(), + qemu_builder: None, + phantom: PhantomData, + } + } +} +impl EmulatorBuilder +where + S: UsesInput + Unpin, +{ + fn new( + modules: ET, + driver: ED, + command_manager: CM, + snapshot_manager: SM, + qemu_builder: Option, + ) -> Self { + Self { + modules, + command_manager, + driver, + snapshot_manager, + qemu_builder, + phantom: PhantomData, + } + } + + pub fn build(self) -> Result, QemuInitError> + where + CM: CommandManager, + ET: EmulatorModuleTuple, + { + let qemu_builder = self.qemu_builder.ok_or(QemuInitError::EmptyArgs)?; + + let mut emulator_hooks = unsafe { EmulatorHooks::new(QemuHooks::get_unchecked()) }; + + self.modules.pre_qemu_init_all(&mut emulator_hooks); + + let qemu: Qemu = match qemu_builder { + QemuBuilder::Qemu(qemu) => qemu, + QemuBuilder::QemuConfig(qemu_config) => { + let res: Result = qemu_config.into(); + res? + } + QemuBuilder::QemuString(qemu_string) => Qemu::init(&qemu_string)?, + }; + + unsafe { + Ok(Emulator::new_with_qemu( + qemu, + emulator_hooks, + self.modules, + self.driver, + self.snapshot_manager, + self.command_manager, + )) + } + } +} + +impl EmulatorBuilder +where + CM: CommandManager, + S: UsesInput + Unpin, +{ + #[must_use] + pub fn qemu_config(self, qemu_config: QemuConfig) -> EmulatorBuilder { + EmulatorBuilder::new( + self.modules, + self.driver, + self.command_manager, + self.snapshot_manager, + Some(QemuBuilder::QemuConfig(qemu_config)), + ) + } + + #[must_use] + pub fn qemu_cli(self, qemu_cli: Vec) -> EmulatorBuilder { + EmulatorBuilder::new( + self.modules, + self.driver, + self.command_manager, + self.snapshot_manager, + Some(QemuBuilder::QemuString(qemu_cli)), + ) + } + + #[must_use] + pub fn qemu(self, qemu: Qemu) -> EmulatorBuilder { + EmulatorBuilder::new( + self.modules, + self.driver, + self.command_manager, + self.snapshot_manager, + Some(QemuBuilder::Qemu(qemu)), + ) + } + + pub fn add_module(self, module: EM) -> EmulatorBuilder + where + EM: EmulatorModule + Unpin, + ET: EmulatorModuleTuple, + { + EmulatorBuilder::new( + self.modules.prepend(module), + self.driver, + self.command_manager, + self.snapshot_manager, + self.qemu_builder, + ) + } + + pub fn driver(self, driver: ED2) -> EmulatorBuilder { + EmulatorBuilder::new( + self.modules, + driver, + self.command_manager, + self.snapshot_manager, + self.qemu_builder, + ) + } + + pub fn command_manager(self, command_manager: CM2) -> EmulatorBuilder + where + CM2: CommandManager, + { + EmulatorBuilder::new( + self.modules, + self.driver, + command_manager, + self.snapshot_manager, + self.qemu_builder, + ) + } + + pub fn modules(self, modules: ET2) -> EmulatorBuilder { + EmulatorBuilder::new( + modules, + self.driver, + self.command_manager, + self.snapshot_manager, + self.qemu_builder, + ) + } + + pub fn snapshot_manager( + self, + snapshot_manager: SM2, + ) -> EmulatorBuilder { + EmulatorBuilder::new( + self.modules, + self.driver, + self.command_manager, + snapshot_manager, + self.qemu_builder, + ) + } +} diff --git a/libafl_qemu/src/emu/drivers.rs b/libafl_qemu/src/emu/drivers.rs new file mode 100644 index 0000000000..0fe0c6f161 --- /dev/null +++ b/libafl_qemu/src/emu/drivers.rs @@ -0,0 +1,290 @@ +//! Emulator Drivers, as the name suggests, drive QEMU execution +//! They are used to perform specific actions on the emulator before and / or after QEMU runs. + +use std::{cell::OnceCell, fmt::Debug}; + +use libafl::{ + executors::ExitKind, + inputs::{HasTargetBytes, UsesInput}, + observers::ObserversTuple, +}; +use libafl_bolts::os::{unix_signals::Signal, CTRL_C_EXIT}; +use typed_builder::TypedBuilder; + +use crate::{ + command::{CommandError, CommandManager, InputCommand, IsCommand}, + modules::EmulatorModuleTuple, + Emulator, EmulatorExitError, EmulatorExitResult, InputLocation, IsSnapshotManager, + QemuShutdownCause, Regs, SnapshotId, SnapshotManagerCheckError, SnapshotManagerError, +}; + +#[derive(Debug, Clone)] +pub enum EmulatorDriverResult +where + CM: CommandManager, + S: UsesInput, +{ + /// Return to the harness immediately. Can happen at any point of the run when the handler is not supposed to handle a request. + ReturnToHarness(EmulatorExitResult), + + /// The run is over and the emulator is ready for the next iteration. + EndOfRun(ExitKind), +} + +#[derive(Debug, Clone)] +pub enum EmulatorDriverError { + QemuExitReasonError(EmulatorExitError), + SMError(SnapshotManagerError), + SMCheckError(SnapshotManagerCheckError), + CommandError(CommandError), + UnhandledSignal(Signal), + MultipleSnapshotDefinition, + MultipleInputDefinition, + SnapshotNotFound, +} + +/// An Emulator Driver. +// TODO remove 'static when specialization will be stable +pub trait EmulatorDriver: 'static + Sized +where + CM: CommandManager, + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, +{ + /// Just before calling user's harness for the first time. + /// Called only once + fn first_harness_exec(emulator: &mut Emulator, state: &mut S) { + emulator.modules.first_exec_all(state); + } + + /// Just before calling user's harness + fn pre_harness_exec( + emulator: &mut Emulator, + state: &mut S, + input: &S::Input, + ) { + emulator.modules.pre_exec_all(state, input); + } + + /// Just after returning from user's harness + fn post_harness_exec( + emulator: &mut Emulator, + input: &S::Input, + observers: &mut OT, + state: &mut S, + exit_kind: &mut ExitKind, + ) where + OT: ObserversTuple, + { + emulator + .modules + .post_exec_all(state, input, observers, exit_kind); + } + + /// Just before entering QEMU + fn pre_qemu_exec(_emulator: &mut Emulator, _input: &S::Input) {} + + /// Just after QEMU exits + #[allow(clippy::type_complexity)] + fn post_qemu_exec( + _emulator: &mut Emulator, + _state: &mut S, + exit_reason: &mut Result, EmulatorExitError>, + _input: &S::Input, + ) -> Result>, EmulatorDriverError> { + match exit_reason { + Ok(reason) => Ok(Some(EmulatorDriverResult::ReturnToHarness(reason.clone()))), + Err(error) => Err(error.clone().into()), + } + } +} + +pub struct NopEmulatorDriver; +impl EmulatorDriver for NopEmulatorDriver +where + CM: CommandManager, + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, +{ +} + +#[derive(Clone, Debug, Default, TypedBuilder)] +#[allow(clippy::struct_excessive_bools)] +pub struct StdEmulatorDriver { + #[builder(default = OnceCell::new())] + snapshot_id: OnceCell, + #[builder(default = OnceCell::new())] + input_location: OnceCell, + #[builder(default = true)] + hooks_locked: bool, + #[cfg(feature = "systemmode")] + #[builder(default = false)] + allow_page_on_start: bool, + #[cfg(feature = "x86_64")] + #[builder(default = false)] + process_only: bool, + #[builder(default = false)] + print_commands: bool, +} + +impl StdEmulatorDriver { + pub fn set_input_location(&self, input_location: InputLocation) -> Result<(), InputLocation> { + self.input_location.set(input_location) + } + + pub fn set_snapshot_id(&self, snapshot_id: SnapshotId) -> Result<(), SnapshotId> { + self.snapshot_id.set(snapshot_id) + } + + pub fn snapshot_id(&self) -> Option { + Some(*self.snapshot_id.get()?) + } + + // return if was locked or not + pub fn unlock_hooks(&mut self) -> bool { + let was_locked = self.hooks_locked; + self.hooks_locked = false; + was_locked + } + + #[cfg(feature = "systemmode")] + pub fn allow_page_on_start(&self) -> bool { + self.allow_page_on_start + } + + #[cfg(feature = "x86_64")] + pub fn is_process_only(&self) -> bool { + self.process_only + } +} + +// TODO: replace handlers with generics to permit compile-time customization of handlers +impl EmulatorDriver for StdEmulatorDriver +where + CM: CommandManager, + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, + S::Input: HasTargetBytes, + SM: IsSnapshotManager, +{ + fn first_harness_exec(emulator: &mut Emulator, state: &mut S) { + if !emulator.driver.hooks_locked { + emulator.modules.first_exec_all(state); + } + } + + fn pre_harness_exec( + emulator: &mut Emulator, + state: &mut S, + input: &S::Input, + ) { + if !emulator.driver.hooks_locked { + emulator.modules.pre_exec_all(state, input); + } + + let input_location = { emulator.driver.input_location.get().cloned() }; + + if let Some(input_location) = input_location { + let input_command = + InputCommand::new(input_location.mem_chunk.clone(), input_location.cpu); + + input_command + .run(emulator, state, input, input_location.ret_register) + .unwrap(); + } + } + + fn post_harness_exec( + emulator: &mut Emulator, + input: &S::Input, + observers: &mut OT, + state: &mut S, + exit_kind: &mut ExitKind, + ) where + OT: ObserversTuple, + { + if !emulator.driver.hooks_locked { + emulator + .modules + .post_exec_all(state, input, observers, exit_kind); + } + } + + fn pre_qemu_exec(_emulator: &mut Emulator, _input: &S::Input) {} + + fn post_qemu_exec( + emulator: &mut Emulator, + state: &mut S, + exit_reason: &mut Result, EmulatorExitError>, + input: &S::Input, + ) -> Result>, EmulatorDriverError> { + let qemu = emulator.qemu(); + + let mut exit_reason = match exit_reason { + Ok(exit_reason) => exit_reason, + Err(exit_error) => match exit_error { + EmulatorExitError::UnexpectedExit => { + if let Some(snapshot_id) = emulator.driver.snapshot_id.get() { + emulator.snapshot_manager.restore(qemu, snapshot_id)?; + } + return Ok(Some(EmulatorDriverResult::EndOfRun(ExitKind::Crash))); + } + _ => Err(exit_error.clone())?, + }, + }; + + #[allow(clippy::type_complexity)] + let (command, ret_reg): (Option, Option) = match &mut exit_reason { + EmulatorExitResult::QemuExit(shutdown_cause) => match shutdown_cause { + QemuShutdownCause::HostSignal(signal) => { + signal.handle(); + return Err(EmulatorDriverError::UnhandledSignal(*signal)); + } + QemuShutdownCause::GuestPanic => { + return Ok(Some(EmulatorDriverResult::EndOfRun(ExitKind::Crash))) + } + QemuShutdownCause::GuestShutdown | QemuShutdownCause::HostQmpQuit => { + log::warn!("Guest shutdown. Stopping fuzzing..."); + std::process::exit(CTRL_C_EXIT); + } + _ => panic!("Unhandled QEMU shutdown cause: {shutdown_cause:?}."), + }, + EmulatorExitResult::Timeout => { + return Ok(Some(EmulatorDriverResult::EndOfRun(ExitKind::Timeout))) + } + EmulatorExitResult::Breakpoint(bp) => (bp.trigger(qemu), None), + EmulatorExitResult::SyncExit(sync_backdoor) => { + let command = sync_backdoor.command().clone(); + (Some(command), Some(sync_backdoor.ret_reg())) + } + }; + + if let Some(cmd) = command { + if emulator.driver.print_commands { + println!("Received command: {cmd:?}"); + } + cmd.run(emulator, state, input, ret_reg) + } else { + Ok(Some(EmulatorDriverResult::ReturnToHarness( + exit_reason.clone(), + ))) + } + } +} + +impl TryFrom> for ExitKind +where + CM: CommandManager, + S: UsesInput, +{ + type Error = String; + + fn try_from(value: EmulatorDriverResult) -> Result { + match value { + EmulatorDriverResult::ReturnToHarness(unhandled_qemu_exit) => { + Err(format!("Unhandled QEMU exit: {:?}", &unhandled_qemu_exit)) + } + EmulatorDriverResult::EndOfRun(exit_kind) => Ok(exit_kind), + } + } +} diff --git a/libafl_qemu/src/emu/hooks.rs b/libafl_qemu/src/emu/hooks.rs new file mode 100644 index 0000000000..eef034c61e --- /dev/null +++ b/libafl_qemu/src/emu/hooks.rs @@ -0,0 +1,1400 @@ +#![allow(clippy::missing_transmute_annotations)] + +use std::{fmt::Debug, marker::PhantomData, mem::transmute, pin::Pin, ptr}; + +use libafl::{executors::ExitKind, inputs::UsesInput, observers::ObserversTuple}; +use libafl_qemu_sys::{CPUArchStatePtr, CPUStatePtr, FatPtr, GuestAddr, GuestUsize, TCGTemp}; + +#[cfg(feature = "usermode")] +use crate::qemu::{ + closure_post_syscall_hook_wrapper, closure_pre_syscall_hook_wrapper, + func_post_syscall_hook_wrapper, func_pre_syscall_hook_wrapper, PostSyscallHook, + PostSyscallHookId, PreSyscallHook, PreSyscallHookId, SyscallHookResult, +}; +#[cfg(feature = "usermode")] +use crate::qemu::{ + CrashHookClosure, CrashHookFn, PostSyscallHookClosure, PostSyscallHookFn, + PreSyscallHookClosure, PreSyscallHookFn, +}; +use crate::{ + cpu_run_post_exec_hook_wrapper, cpu_run_pre_exec_hook_wrapper, + modules::{EmulatorModule, EmulatorModuleTuple}, + qemu::{ + block_0_exec_hook_wrapper, block_gen_hook_wrapper, block_post_gen_hook_wrapper, + closure_backdoor_hook_wrapper, closure_instruction_hook_wrapper, + closure_new_thread_hook_wrapper, cmp_0_exec_hook_wrapper, cmp_1_exec_hook_wrapper, + cmp_2_exec_hook_wrapper, cmp_3_exec_hook_wrapper, cmp_gen_hook_wrapper, + edge_0_exec_hook_wrapper, edge_gen_hook_wrapper, func_backdoor_hook_wrapper, + func_instruction_hook_wrapper, func_new_thread_hook_wrapper, read_0_exec_hook_wrapper, + read_1_exec_hook_wrapper, read_2_exec_hook_wrapper, read_3_exec_hook_wrapper, + read_4_exec_hook_wrapper, read_gen_hook_wrapper, write_0_exec_hook_wrapper, + write_1_exec_hook_wrapper, write_2_exec_hook_wrapper, write_3_exec_hook_wrapper, + write_4_exec_hook_wrapper, write_gen_hook_wrapper, BackdoorHook, BackdoorHookClosure, + BackdoorHookFn, BackdoorHookId, BlockExecHook, BlockGenHook, BlockHookId, BlockPostGenHook, + CmpExecHook, CmpGenHook, CmpHookId, EdgeExecHook, EdgeGenHook, EdgeHookId, Hook, HookRepr, + InstructionHook, InstructionHookClosure, InstructionHookFn, InstructionHookId, + NewThreadHook, NewThreadHookClosure, NewThreadHookId, QemuHooks, ReadExecHook, + ReadExecNHook, ReadGenHook, ReadHookId, TcgHookState, WriteExecHook, WriteExecNHook, + WriteGenHook, WriteHookId, + }, + CpuPostRunHook, CpuPreRunHook, CpuRunHookId, HookState, MemAccessInfo, Qemu, +}; + +macro_rules! get_raw_hook { + ($h:expr, $replacement:expr, $fntype:ty) => { + match $h { + Hook::Function(_) | Hook::Closure(_) => Some($replacement as $fntype), + Hook::Raw(r) => { + let v: $fntype = transmute(r); + Some(v) + } + Hook::Empty => None, + } + }; +} + +macro_rules! hook_to_repr { + ($h:expr) => { + match $h { + Hook::Function(f) => HookRepr::Function(f as *const libc::c_void), + Hook::Closure(c) => HookRepr::Closure(transmute(c)), + Hook::Raw(_) => HookRepr::Empty, // managed by emu + Hook::Empty => HookRepr::Empty, + } + }; +} + +static mut EMULATOR_MODULES: *mut () = ptr::null_mut(); + +#[cfg(feature = "usermode")] +pub extern "C" fn crash_hook_wrapper(target_sig: i32) +where + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, +{ + unsafe { + let emulator_modules = EmulatorModules::::emulator_modules_mut().unwrap(); + + let crash_hooks_ptr = &raw mut emulator_modules.hooks.crash_hooks; + + for crash_hook in &mut (*crash_hooks_ptr) { + match crash_hook { + HookRepr::Function(ptr) => { + let func: CrashHookFn = transmute(*ptr); + func(emulator_modules, target_sig); + } + HookRepr::Closure(ptr) => { + let func: &mut CrashHookClosure = + &mut *(ptr::from_mut::(ptr) as *mut CrashHookClosure); + func(emulator_modules, target_sig); + } + HookRepr::Empty => (), + } + } + } +} + +/// High-level `Emulator` modules, using `QemuHooks`. +#[derive(Debug)] +pub struct EmulatorModules +where + S: UsesInput, +{ + qemu: Qemu, + modules: Pin>, + hooks: EmulatorHooks, + phantom: PhantomData, +} + +/// Hook collection, +#[derive(Debug)] +pub struct EmulatorHooks +where + S: UsesInput, +{ + qemu_hooks: QemuHooks, + + instruction_hooks: Vec>>, + backdoor_hooks: Vec>>, + edge_hooks: Vec>>>, + block_hooks: Vec>>>, + read_hooks: Vec>>>, + write_hooks: Vec>>>, + cmp_hooks: Vec>>>, + + cpu_run_hooks: Vec>>>, + + new_thread_hooks: Vec>>, + + #[cfg(feature = "usermode")] + pre_syscall_hooks: Vec>>, + + #[cfg(feature = "usermode")] + post_syscall_hooks: Vec>>, + + #[cfg(feature = "usermode")] + crash_hooks: Vec, + + phantom: PhantomData<(ET, S)>, +} + +impl EmulatorHooks +where + S: UsesInput + Unpin, +{ + #[must_use] + pub fn new(qemu_hooks: QemuHooks) -> Self { + Self { + qemu_hooks, + phantom: PhantomData, + instruction_hooks: Vec::new(), + backdoor_hooks: Vec::new(), + edge_hooks: Vec::new(), + block_hooks: Vec::new(), + read_hooks: Vec::new(), + write_hooks: Vec::new(), + cmp_hooks: Vec::new(), + + cpu_run_hooks: Vec::new(), + + new_thread_hooks: Vec::new(), + + #[cfg(feature = "usermode")] + pre_syscall_hooks: Vec::new(), + + #[cfg(feature = "usermode")] + post_syscall_hooks: Vec::new(), + + #[cfg(feature = "usermode")] + crash_hooks: Vec::new(), + } + } + + pub fn instruction_closure( + &mut self, + addr: GuestAddr, + hook: InstructionHookClosure, + invalidate_block: bool, + ) -> InstructionHookId { + let fat: FatPtr = unsafe { transmute(hook) }; + + self.instruction_hooks + .push(Box::pin((InstructionHookId::invalid(), fat))); + + unsafe { + let hook_state = &mut self + .instruction_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .1 as *mut FatPtr; + + let id = self.qemu_hooks.add_instruction_hooks( + &mut *hook_state, + addr, + closure_instruction_hook_wrapper::, + invalidate_block, + ); + self.instruction_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .0 = id; + id + } + } + + pub fn instructions( + &mut self, + addr: GuestAddr, + hook: InstructionHook, + invalidate_block: bool, + ) -> Option { + match hook { + Hook::Function(f) => Some(self.instruction_function(addr, f, invalidate_block)), + Hook::Closure(c) => Some(self.instruction_closure(addr, c, invalidate_block)), + Hook::Raw(r) => { + let z: *const () = ptr::null::<()>(); + Some( + self.qemu_hooks + .add_instruction_hooks(z, addr, r, invalidate_block), + ) + } + Hook::Empty => None, + } + } + + pub fn instruction_function( + &mut self, + addr: GuestAddr, + hook: InstructionHookFn, + invalidate_block: bool, + ) -> InstructionHookId { + unsafe { + self.qemu_hooks.add_instruction_hooks( + transmute(hook), + addr, + func_instruction_hook_wrapper::, + invalidate_block, + ) + } + } + + pub fn edges( + &mut self, + generation_hook: EdgeGenHook, + execution_hook: EdgeExecHook, + ) -> EdgeHookId { + unsafe { + let gen = get_raw_hook!( + generation_hook, + edge_gen_hook_wrapper::, + unsafe extern "C" fn( + &mut TcgHookState<1, EdgeHookId>, + src: GuestAddr, + dest: GuestAddr, + ) -> u64 + ); + + let exec = get_raw_hook!( + execution_hook, + edge_0_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<1, EdgeHookId>, id: u64) + ); + + self.edge_hooks.push(Box::pin(TcgHookState::new( + EdgeHookId::invalid(), + hook_to_repr!(generation_hook), + HookRepr::Empty, + [hook_to_repr!(execution_hook)], + ))); + + let hook_state = &mut *ptr::from_mut::>( + self.edge_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut(), + ); + + let id = self.qemu_hooks.add_edge_hooks(hook_state, gen, exec); + + self.edge_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .set_id(id); + + id + } + } + + pub fn blocks( + &mut self, + generation_hook: BlockGenHook, + post_generation_hook: BlockPostGenHook, + execution_hook: BlockExecHook, + ) -> BlockHookId { + unsafe { + let gen = get_raw_hook!( + generation_hook, + block_gen_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<1, BlockHookId>, pc: GuestAddr) -> u64 + ); + + let postgen = get_raw_hook!( + post_generation_hook, + block_post_gen_hook_wrapper::, + unsafe extern "C" fn( + &mut TcgHookState<1, BlockHookId>, + pc: GuestAddr, + block_length: GuestUsize, + ) + ); + + let exec = get_raw_hook!( + execution_hook, + block_0_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<1, BlockHookId>, id: u64) + ); + + self.block_hooks.push(Box::pin(TcgHookState::new( + BlockHookId::invalid(), + hook_to_repr!(generation_hook), + hook_to_repr!(post_generation_hook), + [hook_to_repr!(execution_hook)], + ))); + + let hook_state = &mut *ptr::from_mut::>( + self.block_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut(), + ); + + let id = self + .qemu_hooks + .add_block_hooks(hook_state, gen, postgen, exec); + + self.block_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .set_id(id); + + id + } + } + + pub fn cpu_runs( + &mut self, + pre_exec_hook: CpuPreRunHook, + post_exec_hook: CpuPostRunHook, + ) -> CpuRunHookId { + unsafe { + let pre_run = get_raw_hook!( + pre_exec_hook, + cpu_run_pre_exec_hook_wrapper::, + unsafe extern "C" fn(&mut HookState, cpu: CPUStatePtr) + ); + + let post_run = get_raw_hook!( + post_exec_hook, + cpu_run_post_exec_hook_wrapper::, + unsafe extern "C" fn(&mut HookState, cpu: CPUStatePtr) + ); + + self.cpu_run_hooks.push(Box::pin(HookState::new( + CpuRunHookId::invalid(), + hook_to_repr!(pre_exec_hook), + hook_to_repr!(post_exec_hook), + ))); + + let hook_state = &mut *ptr::from_mut::>( + self.cpu_run_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut(), + ); + + let id = self + .qemu_hooks + .add_cpu_run_hooks(hook_state, pre_run, post_run); + + self.cpu_run_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .set_id(id); + + id + } + } + + #[allow(clippy::similar_names)] + pub fn reads( + &mut self, + generation_hook: ReadGenHook, + execution_hook_1: ReadExecHook, + execution_hook_2: ReadExecHook, + execution_hook_4: ReadExecHook, + execution_hook_8: ReadExecHook, + execution_hook_n: ReadExecNHook, + ) -> ReadHookId { + unsafe { + let gen = get_raw_hook!( + generation_hook, + read_gen_hook_wrapper::, + unsafe extern "C" fn( + &mut TcgHookState<5, ReadHookId>, + pc: GuestAddr, + addr: *mut TCGTemp, + info: MemAccessInfo, + ) -> u64 + ); + let exec1 = get_raw_hook!( + execution_hook_1, + read_0_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<5, ReadHookId>, id: u64, addr: GuestAddr) + ); + let exec2 = get_raw_hook!( + execution_hook_2, + read_1_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<5, ReadHookId>, id: u64, addr: GuestAddr) + ); + let exec4 = get_raw_hook!( + execution_hook_4, + read_2_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<5, ReadHookId>, id: u64, addr: GuestAddr) + ); + let exec8 = get_raw_hook!( + execution_hook_8, + read_3_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<5, ReadHookId>, id: u64, addr: GuestAddr) + ); + let execn = get_raw_hook!( + execution_hook_n, + read_4_exec_hook_wrapper::, + unsafe extern "C" fn( + &mut TcgHookState<5, ReadHookId>, + id: u64, + addr: GuestAddr, + size: usize, + ) + ); + + self.read_hooks.push(Box::pin(TcgHookState::new( + ReadHookId::invalid(), + hook_to_repr!(generation_hook), + HookRepr::Empty, + [ + hook_to_repr!(execution_hook_1), + hook_to_repr!(execution_hook_2), + hook_to_repr!(execution_hook_4), + hook_to_repr!(execution_hook_8), + hook_to_repr!(execution_hook_n), + ], + ))); + + let hook_state = &mut *ptr::from_mut::>( + self.read_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut(), + ); + + let id = self + .qemu_hooks + .add_read_hooks(hook_state, gen, exec1, exec2, exec4, exec8, execn); + + self.read_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .set_id(id); + + id + } + } + + #[allow(clippy::similar_names)] + pub fn writes( + &mut self, + generation_hook: WriteGenHook, + execution_hook_1: WriteExecHook, + execution_hook_2: WriteExecHook, + execution_hook_4: WriteExecHook, + execution_hook_8: WriteExecHook, + execution_hook_n: WriteExecNHook, + ) -> WriteHookId { + unsafe { + let gen = get_raw_hook!( + generation_hook, + write_gen_hook_wrapper::, + unsafe extern "C" fn( + &mut TcgHookState<5, WriteHookId>, + pc: GuestAddr, + addr: *mut TCGTemp, + info: MemAccessInfo, + ) -> u64 + ); + let exec1 = get_raw_hook!( + execution_hook_1, + write_0_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<5, WriteHookId>, id: u64, addr: GuestAddr) + ); + let exec2 = get_raw_hook!( + execution_hook_2, + write_1_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<5, WriteHookId>, id: u64, addr: GuestAddr) + ); + let exec4 = get_raw_hook!( + execution_hook_4, + write_2_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<5, WriteHookId>, id: u64, addr: GuestAddr) + ); + let exec8 = get_raw_hook!( + execution_hook_8, + write_3_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<5, WriteHookId>, id: u64, addr: GuestAddr) + ); + let execn = get_raw_hook!( + execution_hook_n, + write_4_exec_hook_wrapper::, + unsafe extern "C" fn( + &mut TcgHookState<5, WriteHookId>, + id: u64, + addr: GuestAddr, + size: usize, + ) + ); + + self.write_hooks.push(Box::pin(TcgHookState::new( + WriteHookId::invalid(), + hook_to_repr!(generation_hook), + HookRepr::Empty, + [ + hook_to_repr!(execution_hook_1), + hook_to_repr!(execution_hook_2), + hook_to_repr!(execution_hook_4), + hook_to_repr!(execution_hook_8), + hook_to_repr!(execution_hook_n), + ], + ))); + + let hook_state = &mut *ptr::from_mut::>( + self.write_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut(), + ); + + let id = self + .qemu_hooks + .add_write_hooks(hook_state, gen, exec1, exec2, exec4, exec8, execn); + + self.write_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .set_id(id); + + id + } + } + + pub fn cmps( + &mut self, + generation_hook: CmpGenHook, + execution_hook_1: CmpExecHook, + execution_hook_2: CmpExecHook, + execution_hook_4: CmpExecHook, + execution_hook_8: CmpExecHook, + ) -> CmpHookId { + unsafe { + let gen = get_raw_hook!( + generation_hook, + cmp_gen_hook_wrapper::, + unsafe extern "C" fn( + &mut TcgHookState<4, CmpHookId>, + pc: GuestAddr, + size: usize, + ) -> u64 + ); + let exec1 = get_raw_hook!( + execution_hook_1, + cmp_0_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<4, CmpHookId>, id: u64, v0: u8, v1: u8) + ); + let exec2 = get_raw_hook!( + execution_hook_2, + cmp_1_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<4, CmpHookId>, id: u64, v0: u16, v1: u16) + ); + let exec4 = get_raw_hook!( + execution_hook_4, + cmp_2_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<4, CmpHookId>, id: u64, v0: u32, v1: u32) + ); + let exec8 = get_raw_hook!( + execution_hook_8, + cmp_3_exec_hook_wrapper::, + unsafe extern "C" fn(&mut TcgHookState<4, CmpHookId>, id: u64, v0: u64, v1: u64) + ); + + self.cmp_hooks.push(Box::pin(TcgHookState::new( + CmpHookId::invalid(), + hook_to_repr!(generation_hook), + HookRepr::Empty, + [ + hook_to_repr!(execution_hook_1), + hook_to_repr!(execution_hook_2), + hook_to_repr!(execution_hook_4), + hook_to_repr!(execution_hook_8), + ], + ))); + + let hook_state = &mut *ptr::from_mut::>( + self.cmp_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut(), + ); + + let id = self + .qemu_hooks + .add_cmp_hooks(hook_state, gen, exec1, exec2, exec4, exec8); + + self.cmp_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .set_id(id); + + id + } + } + + /// # Safety + /// Will dereference the hook as [`FatPtr`]. + pub unsafe fn backdoor_closure(&mut self, hook: BackdoorHookClosure) -> BackdoorHookId { + unsafe { + let fat: FatPtr = transmute(hook); + self.backdoor_hooks + .push(Box::pin((BackdoorHookId::invalid(), fat))); + + let hook_state = &mut self + .backdoor_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .1 as *mut FatPtr; + + let id = self + .qemu_hooks + .add_backdoor_hook(&mut *hook_state, closure_backdoor_hook_wrapper::); + + self.backdoor_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .0 = id; + + id + } + } + + pub fn backdoor_function( + &self, + hook: fn(&mut EmulatorModules, Option<&mut S>, cpu: CPUArchStatePtr, pc: GuestAddr), + ) -> BackdoorHookId { + unsafe { + self.qemu_hooks + .add_backdoor_hook(transmute(hook), func_backdoor_hook_wrapper::) + } + } + + /// # Safety + /// This can call through to a potentialy unsafe `backtoor_function` + pub unsafe fn backdoor(&mut self, hook: BackdoorHook) -> Option { + match hook { + Hook::Function(f) => Some(self.backdoor_function(f)), + Hook::Closure(c) => Some(self.backdoor_closure(c)), + Hook::Raw(r) => { + let z: *const () = ptr::null::<()>(); + Some(self.qemu_hooks.add_backdoor_hook(z, r)) + } + Hook::Empty => None, // TODO error type + } + } + + pub fn thread_creation(&mut self, hook: NewThreadHook) -> Option { + match hook { + Hook::Function(f) => Some(self.thread_creation_function(f)), + Hook::Closure(c) => Some(self.thread_creation_closure(c)), + Hook::Raw(r) => { + let z: *const () = ptr::null::<()>(); + Some(self.qemu_hooks.add_new_thread_hook(z, r)) + } + Hook::Empty => None, // TODO error type + } + } + + pub fn thread_creation_function( + &mut self, + hook: fn( + &mut EmulatorModules, + Option<&mut S>, + env: CPUArchStatePtr, + tid: u32, + ) -> bool, + ) -> NewThreadHookId { + unsafe { + self.qemu_hooks + .add_new_thread_hook(transmute(hook), func_new_thread_hook_wrapper::) + } + } + + pub fn thread_creation_closure( + &mut self, + hook: NewThreadHookClosure, + ) -> NewThreadHookId { + unsafe { + let fat: FatPtr = transmute(hook); + self.new_thread_hooks + .push(Box::pin((NewThreadHookId::invalid(), fat))); + + let hook_state = &mut self + .new_thread_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .1 as *mut FatPtr; + + let id = self + .qemu_hooks + .add_new_thread_hook(&mut *hook_state, closure_new_thread_hook_wrapper::); + self.new_thread_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .0 = id; + id + } + } + + pub fn jmps( + &self, + generation_hook: Hook< + fn(&mut Self, Option<&mut S>, src: GuestAddr, dest: GuestAddr) -> Option, + Box< + dyn for<'a> FnMut( + &'a mut Self, + Option<&'a mut S>, + GuestAddr, + GuestAddr, + ) -> Option, + >, + extern "C" fn(*const (), src: GuestAddr, dest: GuestAddr) -> u64, + >, + execution_hook: Hook< + fn(&mut Self, Option<&mut S>, src: GuestAddr, dest: GuestAddr, id: u64), + Box FnMut(&'a mut Self, Option<&'a mut S>, GuestAddr, GuestAddr, u64)>, + extern "C" fn(*const (), src: GuestAddr, dest: GuestAddr, id: u64), + >, + ) -> JmpHookId { + unsafe { + let gen = get_raw_hook!( + generation_hook, + jmp_gen_hook_wrapper::, + unsafe extern "C" fn(&mut HookState<1, JmpHookId>, src: GuestAddr, dest: GuestAddr) -> u64 + ); + let exec = get_raw_hook!( + execution_hook, + jmp_0_exec_hook_wrapper::, + unsafe extern "C" fn(&mut HookState<1, JmpHookId>, src: GuestAddr, dest: GuestAddr, id: u64) + ); + JMP_HOOKS.push(Box::pin(HookState { + id: JmpHookId(0), + gen: hook_to_repr!(generation_hook), + post_gen: HookRepr::Empty, + execs: [hook_to_repr!(execution_hook)], + })); + let id = self + .qemu + .add_jmp_hooks(JMP_HOOKS.last_mut().unwrap().as_mut().get_unchecked_mut(), + gen, + exec + ); + JMP_HOOKS + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .id = id; + id + } + } +} + +#[cfg(feature = "usermode")] +impl EmulatorHooks +where + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, +{ + #[allow(clippy::type_complexity)] + pub fn syscalls(&mut self, hook: PreSyscallHook) -> Option { + match hook { + Hook::Function(f) => Some(self.syscalls_function(f)), + Hook::Closure(c) => Some(self.syscalls_closure(c)), + Hook::Raw(r) => { + let z: *const () = ptr::null::<()>(); + Some(self.qemu_hooks.add_pre_syscall_hook(z, r)) + } + Hook::Empty => None, // TODO error type + } + } + + #[allow(clippy::type_complexity)] + pub fn syscalls_function(&mut self, hook: PreSyscallHookFn) -> PreSyscallHookId { + // # Safety + // Will dereference the hook as [`FatPtr`]. + unsafe { + self.qemu_hooks + .add_pre_syscall_hook(transmute(hook), func_pre_syscall_hook_wrapper::) + } + } + + #[allow(clippy::type_complexity)] + pub fn syscalls_closure(&mut self, hook: PreSyscallHookClosure) -> PreSyscallHookId { + // # Safety + // Will dereference the hook as [`FatPtr`]. + unsafe { + let fat: FatPtr = transmute(hook); + + self.pre_syscall_hooks + .push(Box::pin((PreSyscallHookId::invalid(), fat))); + + let hook_state = &mut self + .pre_syscall_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .1 as *mut FatPtr; + + let id = self + .qemu_hooks + .add_pre_syscall_hook(&mut *hook_state, closure_pre_syscall_hook_wrapper::); + self.pre_syscall_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .0 = id; + id + } + } + + #[allow(clippy::type_complexity)] + pub fn after_syscalls(&mut self, hook: PostSyscallHook) -> Option { + match hook { + Hook::Function(f) => Some(self.after_syscalls_function(f)), + Hook::Closure(c) => Some(self.after_syscalls_closure(c)), + Hook::Raw(r) => { + let z: *const () = ptr::null::<()>(); + Some(self.qemu_hooks.add_post_syscall_hook(z, r)) + } + Hook::Empty => None, // TODO error type + } + } + + #[allow(clippy::type_complexity)] + pub fn after_syscalls_function(&mut self, hook: PostSyscallHookFn) -> PostSyscallHookId { + // # Safety + // Will dereference the hook as [`FatPtr`]. This should be ok. + unsafe { + self.qemu_hooks + .add_post_syscall_hook(transmute(hook), func_post_syscall_hook_wrapper::) + } + } + + #[allow(clippy::type_complexity)] + pub fn after_syscalls_closure( + &mut self, + hook: PostSyscallHookClosure, + ) -> PostSyscallHookId { + unsafe { + let fat: FatPtr = transmute(hook); + self.post_syscall_hooks + .push(Box::pin((PostSyscallHookId::invalid(), fat))); + + let hooks_state = &mut self + .post_syscall_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .1 as *mut FatPtr; + + let id = self.qemu_hooks.add_post_syscall_hook( + &mut *hooks_state, + closure_post_syscall_hook_wrapper::, + ); + self.post_syscall_hooks + .last_mut() + .unwrap() + .as_mut() + .get_unchecked_mut() + .0 = id; + id + } + } + + pub fn crash_function(&mut self, hook: fn(&mut EmulatorModules, target_signal: i32)) { + // # Safety + // Will cast the valid hook to a ptr. + self.qemu_hooks.set_crash_hook(crash_hook_wrapper::); + self.crash_hooks + .push(HookRepr::Function(hook as *const libc::c_void)); + } + + pub fn crash_closure(&mut self, hook: CrashHookClosure) { + // # Safety + // Will cast the hook to a [`FatPtr`]. + unsafe { + self.qemu_hooks.set_crash_hook(crash_hook_wrapper::); + self.crash_hooks.push(HookRepr::Closure(transmute(hook))); + } + } +} + +impl Default for EmulatorHooks +where + S: Unpin + UsesInput, +{ + fn default() -> Self { + Self::new(QemuHooks::get().unwrap()) + } +} + +impl EmulatorModules +where + S: UsesInput, +{ + /// Get a mutable reference to `EmulatorModules` (supposedly initialized beforehand). + /// + /// # Safety + /// + /// This will always return a reference, but it will be incorrect if `EmulatorModules` has not + /// been initialized previously. + /// The user should also be consistent with the generic use (it will suppose they are the same + /// as the ones used at initialization time). + #[must_use] + pub unsafe fn emulator_modules_mut_unchecked<'a>() -> &'a mut EmulatorModules { + #[cfg(debug_assertions)] + { + (EMULATOR_MODULES as *mut EmulatorModules) + .as_mut() + .unwrap() + } + + #[cfg(not(debug_assertions))] + { + &mut *(EMULATOR_MODULES as *mut EmulatorModules) + } + } + + /// Get a mutable reference to `EmulatorModules`. + /// This version is safer than `emulator_modules_mut_unchecked` since it will check that + /// initialization has occurred previously. + /// + /// # Safety + /// + /// This version still presents some unsafeness: The user should be consistent with the + /// generic use (it will suppose they are the same as the ones used at initialization time). + #[must_use] + pub unsafe fn emulator_modules_mut<'a>() -> Option<&'a mut EmulatorModules> { + unsafe { (EMULATOR_MODULES as *mut EmulatorModules).as_mut() } + } +} + +impl EmulatorModules +where + ET: Unpin, + S: UsesInput + Unpin, +{ + pub fn modules_mut(&mut self) -> &mut ET { + self.modules.as_mut().get_mut() + } + + pub fn instructions( + &mut self, + addr: GuestAddr, + hook: InstructionHook, + invalidate_block: bool, + ) -> Option { + self.hooks.instructions(addr, hook, invalidate_block) + } + + pub fn instruction_function( + &mut self, + addr: GuestAddr, + hook: fn(&mut EmulatorModules, Option<&mut S>, GuestAddr), + invalidate_block: bool, + ) -> InstructionHookId { + self.hooks + .instruction_function(addr, hook, invalidate_block) + } + + pub fn instruction_closure( + &mut self, + addr: GuestAddr, + hook: InstructionHookClosure, + invalidate_block: bool, + ) -> InstructionHookId { + self.hooks.instruction_closure(addr, hook, invalidate_block) + } + + pub fn edges( + &mut self, + generation_hook: EdgeGenHook, + execution_hook: EdgeExecHook, + ) -> EdgeHookId { + self.hooks.edges(generation_hook, execution_hook) + } + + pub fn blocks( + &mut self, + generation_hook: BlockGenHook, + post_generation_hook: BlockPostGenHook, + execution_hook: BlockExecHook, + ) -> BlockHookId { + self.hooks + .blocks(generation_hook, post_generation_hook, execution_hook) + } + + #[allow(clippy::similar_names)] + pub fn reads( + &mut self, + generation_hook: ReadGenHook, + execution_hook_1: ReadExecHook, + execution_hook_2: ReadExecHook, + execution_hook_4: ReadExecHook, + execution_hook_8: ReadExecHook, + execution_hook_n: ReadExecNHook, + ) -> ReadHookId { + self.hooks.reads( + generation_hook, + execution_hook_1, + execution_hook_2, + execution_hook_4, + execution_hook_8, + execution_hook_n, + ) + } + + #[allow(clippy::similar_names)] + pub fn writes( + &mut self, + generation_hook: WriteGenHook, + execution_hook_1: WriteExecHook, + execution_hook_2: WriteExecHook, + execution_hook_4: WriteExecHook, + execution_hook_8: WriteExecHook, + execution_hook_n: WriteExecNHook, + ) -> WriteHookId { + self.hooks.writes( + generation_hook, + execution_hook_1, + execution_hook_2, + execution_hook_4, + execution_hook_8, + execution_hook_n, + ) + } + + pub fn cmps( + &mut self, + generation_hook: CmpGenHook, + execution_hook_1: CmpExecHook, + execution_hook_2: CmpExecHook, + execution_hook_4: CmpExecHook, + execution_hook_8: CmpExecHook, + ) -> CmpHookId { + self.hooks.cmps( + generation_hook, + execution_hook_1, + execution_hook_2, + execution_hook_4, + execution_hook_8, + ) + } + + /// # Safety + /// This will potentially call an unsafe backdoor hook + pub unsafe fn backdoor(&mut self, hook: BackdoorHook) -> Option { + self.hooks.backdoor(hook) + } + + pub fn backdoor_function(&mut self, hook: BackdoorHookFn) -> BackdoorHookId { + self.hooks.backdoor_function(hook) + } + + /// # Safety + /// Calls through to the potentially unsafe `backdoor_closure` + pub unsafe fn backdoor_closure(&mut self, hook: BackdoorHookClosure) -> BackdoorHookId { + self.hooks.backdoor_closure(hook) + } + + pub fn thread_creation(&mut self, hook: NewThreadHook) -> Option { + self.hooks.thread_creation(hook) + } + + pub fn thread_creation_function( + &mut self, + hook: fn( + &mut EmulatorModules, + Option<&mut S>, + env: CPUArchStatePtr, + tid: u32, + ) -> bool, + ) -> NewThreadHookId { + self.hooks.thread_creation_function(hook) + } + + pub fn thread_creation_closure( + &mut self, + hook: NewThreadHookClosure, + ) -> NewThreadHookId { + self.hooks.thread_creation_closure(hook) + } +} + +impl EmulatorModules +where + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, +{ + pub(super) fn new( + qemu: Qemu, + emulator_hooks: EmulatorHooks, + modules: ET, + ) -> Pin> { + let mut modules = Box::pin(Self { + qemu, + modules: Box::pin(modules), + hooks: emulator_hooks, + phantom: PhantomData, + }); + + // re-translate blocks with hooks + // qemu.flush_jit(); + // -> it should be useless, since EmulatorModules must be init before QEMU ever runs + // TODO: Check if this is true + + // Set global EmulatorModules pointer + unsafe { + if EMULATOR_MODULES.is_null() { + EMULATOR_MODULES = ptr::from_mut::(modules.as_mut().get_mut()) as *mut (); + } else { + panic!("Emulator Modules have already been set and is still active. It is not supported to have multiple instances of `EmulatorModules` at the same time yet.") + } + } + + modules + } + + pub fn post_qemu_init_all(&mut self) { + // We give access to EmulatorModuleTuple during init, the compiler complains (for good reasons) + // TODO: We should find a way to be able to check for a module without giving full access to the tuple. + unsafe { + self.modules_mut() + .post_qemu_init_all(Self::emulator_modules_mut_unchecked()); + } + } + + pub fn first_exec_all(&mut self, state: &mut S) { + // # Safety + // We assume that the emulator was initialized correctly + unsafe { + self.modules_mut() + .first_exec_all(Self::emulator_modules_mut_unchecked(), state); + } + } + + pub fn pre_exec_all(&mut self, state: &mut S, input: &S::Input) { + // # Safety + // We assume that the emulator was initialized correctly + unsafe { + self.modules_mut() + .pre_exec_all(Self::emulator_modules_mut_unchecked(), state, input); + } + } + + pub fn post_exec_all( + &mut self, + state: &mut S, + input: &S::Input, + observers: &mut OT, + exit_kind: &mut ExitKind, + ) where + OT: ObserversTuple, + { + unsafe { + self.modules_mut().post_exec_all( + Self::emulator_modules_mut_unchecked(), + state, + input, + observers, + exit_kind, + ); + } + } + + /// Get a reference to the first (type) matching member of the tuple. + #[must_use] + pub fn get(&self) -> Option<&T> + where + T: EmulatorModule, + { + self.modules.match_first_type::() + } + + /// Get a mutable reference to the first (type) matching member of the tuple. + pub fn get_mut(&mut self) -> Option<&mut T> + where + T: EmulatorModule, + { + self.modules.match_first_type_mut::() + } +} + +impl EmulatorModules +where + S: UsesInput, +{ + #[must_use] + pub fn qemu(&self) -> Qemu { + self.qemu + } + + #[must_use] + pub fn modules(&self) -> &ET { + self.modules.as_ref().get_ref() + } + + pub fn hooks_mut(&mut self) -> &mut EmulatorHooks { + &mut self.hooks + } +} + +/// Usermode-only high-level functions +#[cfg(feature = "usermode")] +impl EmulatorModules +where + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, +{ + #[allow(clippy::type_complexity)] + pub fn syscalls(&mut self, hook: PreSyscallHook) -> Option { + self.hooks.syscalls(hook) + } + + /// # Safety + /// Calls through to the, potentially unsafe, `syscalls_function` + #[allow(clippy::type_complexity)] + pub unsafe fn syscalls_function( + &mut self, + hook: fn( + &mut EmulatorModules, + Option<&mut S>, + sys_num: i32, + a0: GuestAddr, + a1: GuestAddr, + a2: GuestAddr, + a3: GuestAddr, + a4: GuestAddr, + a5: GuestAddr, + a6: GuestAddr, + a7: GuestAddr, + ) -> SyscallHookResult, + ) -> PreSyscallHookId { + self.hooks.syscalls_function(hook) + } + + /// # Safety + /// Calls through to the, potentially unsafe, `syscalls_closure` + #[allow(clippy::type_complexity)] + pub unsafe fn syscalls_closure( + &mut self, + hook: Box< + dyn for<'a> FnMut( + &'a mut EmulatorModules, + Option<&'a mut S>, + i32, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + ) -> SyscallHookResult, + >, + ) -> PreSyscallHookId { + self.hooks.syscalls_closure(hook) + } + + #[allow(clippy::type_complexity)] + pub fn after_syscalls(&mut self, hook: PostSyscallHook) -> Option { + self.hooks.after_syscalls(hook) + } + + /// # Safety + /// Calls through to the, potentially unsafe, `after_syscalls_function` + #[allow(clippy::type_complexity)] + pub unsafe fn after_syscalls_function( + &mut self, + hook: fn( + &mut EmulatorModules, + Option<&mut S>, + res: GuestAddr, + sys_num: i32, + a0: GuestAddr, + a1: GuestAddr, + a2: GuestAddr, + a3: GuestAddr, + a4: GuestAddr, + a5: GuestAddr, + a6: GuestAddr, + a7: GuestAddr, + ) -> GuestAddr, + ) -> PostSyscallHookId { + self.hooks.after_syscalls_function(hook) + } + + #[allow(clippy::type_complexity)] + pub fn after_syscalls_closure( + &mut self, + hook: Box< + dyn for<'a> FnMut( + &'a mut EmulatorModules, + Option<&mut S>, + GuestAddr, + i32, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + ) -> GuestAddr, + >, + ) -> PostSyscallHookId { + self.hooks.after_syscalls_closure(hook) + } + + pub fn crash_function(&mut self, hook: fn(&mut EmulatorModules, target_signal: i32)) { + self.hooks.crash_function(hook); + } + + /// # Safety + /// Calls through to the, potentially unsafe, registered `crash_closure` + pub unsafe fn crash_closure(&mut self, hook: CrashHookClosure) { + self.hooks.crash_closure(hook); + } +} + +impl Drop for EmulatorModules +where + S: UsesInput, +{ + fn drop(&mut self) { + // Make the global pointer null at drop time + unsafe { + EMULATOR_MODULES = ptr::null_mut(); + } + } +} diff --git a/libafl_qemu/src/emu/mod.rs b/libafl_qemu/src/emu/mod.rs index 601a89de60..0fcf92e3f6 100644 --- a/libafl_qemu/src/emu/mod.rs +++ b/libafl_qemu/src/emu/mod.rs @@ -2,75 +2,107 @@ //! //! [`Emulator`] is built above [`Qemu`] and provides convenient abstractions. -use core::{ - fmt::{self, Debug, Display, Formatter}, - marker::PhantomData, -}; -use std::{ - cell::{OnceCell, Ref, RefCell, RefMut}, - hash::Hash, - ops::Add, - rc::Rc, -}; +use core::fmt::{self, Debug, Display, Formatter}; +use std::{cell::RefCell, ops::Add, pin::Pin}; use hashbrown::HashMap; use libafl::{ executors::ExitKind, - inputs::HasTargetBytes, + inputs::{HasTargetBytes, UsesInput}, + observers::ObserversTuple, state::{HasExecutions, State}, }; -use libafl_bolts::os::unix_signals::Signal; -use libafl_qemu_sys::{CPUArchStatePtr, GuestUsize}; -pub use libafl_qemu_sys::{GuestAddr, GuestPhysAddr, GuestVirtAddr}; -#[cfg(emulation_mode = "usermode")] -pub use libafl_qemu_sys::{MapInfo, MmapPerms, MmapPermsIter}; -use num_traits::Num; -use typed_builder::TypedBuilder; +use libafl_qemu_sys::{GuestAddr, GuestPhysAddr, GuestUsize, GuestVirtAddr}; use crate::{ - breakpoint::Breakpoint, - command::{CommandError, InputCommand, IsCommand}, - executor::QemuExecutorState, + breakpoint::{Breakpoint, BreakpointId}, + command::{CommandError, CommandManager, NopCommandManager, StdCommandManager}, + modules::EmulatorModuleTuple, sync_exit::SyncExit, - sys::TCGTemp, - BackdoorHookId, BlockHookId, CmpHookId, EdgeHookId, EmulatorMemoryChunk, GuestReg, HookData, - HookId, InstructionHookId, MemAccessInfo, Qemu, QemuExitError, QemuExitReason, QemuHelperTuple, - QemuInitError, QemuRWError, QemuShutdownCause, QemuSnapshotCheckResult, ReadHookId, Regs, - StdInstrumentationFilter, WriteHookId, CPU, + Qemu, QemuExitError, QemuExitReason, QemuHooks, QemuInitError, QemuMemoryChunk, + QemuShutdownCause, Regs, CPU, }; -#[cfg(emulation_mode = "usermode")] +mod hooks; +pub use hooks::*; + +mod builder; +pub use builder::*; + +mod drivers; +pub use drivers::*; + +mod snapshot; +pub use snapshot::*; + +#[cfg(feature = "usermode")] mod usermode; +#[cfg(feature = "usermode")] +pub use usermode::*; -#[cfg(emulation_mode = "systemmode")] +#[cfg(feature = "systemmode")] mod systemmode; -#[cfg(emulation_mode = "systemmode")] +#[cfg(feature = "systemmode")] pub use systemmode::*; -use crate::{breakpoint::BreakpointId, command::CommandManager}; - -type CommandRef = Rc>; -type BreakpointMutRef = Rc>>; - #[derive(Clone, Copy)] pub enum GuestAddrKind { Physical(GuestPhysAddr), Virtual(GuestVirtAddr), } -#[derive(Debug, Clone)] -pub enum EmulatorExitResult +pub enum EmulatorExitResult where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { - QemuExit(QemuShutdownCause), // QEMU ended for some reason. - Breakpoint(Rc>>), // Breakpoint triggered. Contains the address of the trigger. - SyncExit(Rc>>), // Synchronous backdoor: The guest triggered a backdoor and should return to LibAFL. + QemuExit(QemuShutdownCause), // QEMU ended for some reason. + Breakpoint(Breakpoint), // Breakpoint triggered. Contains the address of the trigger. + SyncExit(SyncExit), // Synchronous backdoor: The guest triggered a backdoor and should return to LibAFL. + Timeout, // Timeout } +impl Clone for EmulatorExitResult +where + CM: CommandManager, + S: UsesInput, +{ + fn clone(&self) -> Self { + match self { + EmulatorExitResult::QemuExit(qemu_exit) => { + EmulatorExitResult::QemuExit(qemu_exit.clone()) + } + EmulatorExitResult::Breakpoint(bp) => EmulatorExitResult::Breakpoint(bp.clone()), + EmulatorExitResult::SyncExit(sync_exit) => { + EmulatorExitResult::SyncExit(sync_exit.clone()) + } + EmulatorExitResult::Timeout => EmulatorExitResult::Timeout, + } + } +} + +impl Debug for EmulatorExitResult +where + CM: CommandManager, + S: UsesInput, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + EmulatorExitResult::QemuExit(qemu_exit) => { + write!(f, "{qemu_exit:?}") + } + EmulatorExitResult::Breakpoint(bp) => { + write!(f, "{bp:?}") + } + EmulatorExitResult::SyncExit(sync_exit) => { + write!(f, "{sync_exit:?}") + } + EmulatorExitResult::Timeout => { + write!(f, "Timeout") + } + } + } +} #[derive(Debug, Clone)] pub enum EmulatorExitError { UnknownKind, @@ -80,65 +112,48 @@ pub enum EmulatorExitError { } #[derive(Debug, Clone)] -pub enum ExitHandlerResult +pub struct InputLocation { + mem_chunk: QemuMemoryChunk, + cpu: CPU, + ret_register: Option, +} + +#[derive(Debug)] +#[allow(clippy::type_complexity)] +pub struct Emulator where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { - ReturnToHarness(EmulatorExitResult), // Return to the harness immediately. Can happen at any point of the run when the handler is not supposed to handle a request. - EndOfRun(ExitKind), // The run is over and the emulator is ready for the next iteration. + snapshot_manager: SM, + modules: Pin>>, + command_manager: CM, + driver: ED, + breakpoints_by_addr: RefCell>>, // TODO: change to RC here + breakpoints_by_id: RefCell>>, + qemu: Qemu, } -#[derive(Debug, Clone)] -pub enum ExitHandlerError { - QemuExitReasonError(EmulatorExitError), - SMError(SnapshotManagerError), - SMCheckError(SnapshotManagerCheckError), - CommandError(CommandError), - UnhandledSignal(Signal), - MultipleSnapshotDefinition, - MultipleInputDefinition, - SnapshotNotFound, -} - -#[derive(Debug, Clone)] -pub enum SnapshotManagerError { - SnapshotIdNotFound(SnapshotId), - MemoryInconsistencies(u64), -} - -#[derive(Debug, Clone)] -pub enum SnapshotManagerCheckError { - SnapshotManagerError(SnapshotManagerError), - SnapshotCheckError(QemuSnapshotCheckResult), -} - -impl TryFrom> for ExitKind +impl EmulatorDriverResult where - CM: CommandManager + Debug, - E: EmulatorExitHandler, - QT: QemuHelperTuple + Debug, - S: State + HasExecutions + Debug, + CM: CommandManager, + S: UsesInput, { - type Error = String; - - fn try_from(value: ExitHandlerResult) -> Result { - match value { - ExitHandlerResult::ReturnToHarness(unhandled_qemu_exit) => { - Err(format!("Unhandled QEMU exit: {:?}", &unhandled_qemu_exit)) - } - ExitHandlerResult::EndOfRun(exit_kind) => Ok(exit_kind), + #[must_use] + #[allow(clippy::match_wildcard_for_single_variants)] + pub fn end_of_run(&self) -> Option { + match self { + EmulatorDriverResult::EndOfRun(exit_kind) => Some(*exit_kind), + _ => None, } } } impl Debug for GuestAddrKind { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { - GuestAddrKind::Physical(paddr) => write!(f, "vaddr {paddr:x}"), - GuestAddrKind::Virtual(vaddr) => write!(f, "paddr {vaddr:x}"), + GuestAddrKind::Physical(paddr) => write!(f, "paddr {paddr:#x}"), + GuestAddrKind::Virtual(vaddr) => write!(f, "vaddr {vaddr:#x}"), } } } @@ -155,7 +170,7 @@ impl Add for GuestAddrKind { } impl Display for GuestAddrKind { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { GuestAddrKind::Physical(phys_addr) => write!(f, "hwaddr 0x{phys_addr:x}"), GuestAddrKind::Virtual(virt_addr) => write!(f, "vaddr 0x{virt_addr:x}"), @@ -163,114 +178,21 @@ impl Display for GuestAddrKind { } } -impl From for ExitHandlerError { +impl From for EmulatorDriverError { fn from(sm_error: SnapshotManagerError) -> Self { - ExitHandlerError::SMError(sm_error) + EmulatorDriverError::SMError(sm_error) } } -impl From for ExitHandlerError { +impl From for EmulatorDriverError { fn from(sm_check_error: SnapshotManagerCheckError) -> Self { - ExitHandlerError::SMCheckError(sm_check_error) + EmulatorDriverError::SMCheckError(sm_check_error) } } -#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] -pub struct SnapshotId { - id: u64, -} - -pub trait IsSnapshotManager: Debug + Clone { - fn save(&mut self, qemu: &Qemu) -> SnapshotId; - fn restore( - &mut self, - snapshot_id: &SnapshotId, - qemu: &Qemu, - ) -> Result<(), SnapshotManagerError>; - fn do_check( - &self, - reference_snapshot_id: &SnapshotId, - qemu: &Qemu, - ) -> Result; - - fn check( - &self, - reference_snapshot_id: &SnapshotId, - qemu: &Qemu, - ) -> Result<(), SnapshotManagerCheckError> { - let check_result = self - .do_check(reference_snapshot_id, qemu) - .map_err(SnapshotManagerCheckError::SnapshotManagerError)?; - - if check_result == QemuSnapshotCheckResult::default() { - Ok(()) - } else { - Err(SnapshotManagerCheckError::SnapshotCheckError(check_result)) - } - } -} - -// TODO: Rework with generics for command handlers? -pub trait EmulatorExitHandler: Sized + Debug + Clone -where - QT: QemuHelperTuple, - S: State + HasExecutions, -{ - fn qemu_pre_run>( - emu: &Emulator, - qemu_executor_state: &mut QemuExecutorState, - input: &S::Input, - ); - - fn qemu_post_run>( - emu: &Emulator, - exit_reason: Result, EmulatorExitError>, - qemu_executor_state: &mut QemuExecutorState, - input: &S::Input, - ) -> Result>, ExitHandlerError>; -} - -/// Special kind of Exit handler with no data embedded. -/// As a result, it is safe to transmute from any `Emulator` implementing `EmuExitHandler` to this one, -/// since it won't use any data which could cause type confusion. -#[derive(Clone, Debug)] -pub struct NopEmulatorExitHandler; - -impl EmulatorExitHandler for NopEmulatorExitHandler -where - QT: QemuHelperTuple, - S: State + HasExecutions, -{ - fn qemu_pre_run>( - _: &Emulator, - _: &mut QemuExecutorState, - _: &S::Input, - ) { - } - - fn qemu_post_run>( - _: &Emulator, - exit_reason: Result, EmulatorExitError>, - _: &mut QemuExecutorState, - _: &S::Input, - ) -> Result>, ExitHandlerError> { - match exit_reason { - Ok(reason) => Ok(Some(ExitHandlerResult::ReturnToHarness(reason))), - Err(error) => Err(error)?, - } - } -} - -#[derive(Debug, Clone)] -pub struct InputLocation { - mem_chunk: EmulatorMemoryChunk, - cpu: CPU, - ret_register: Option, -} - impl InputLocation { #[must_use] - pub fn new(mem_chunk: EmulatorMemoryChunk, cpu: CPU, ret_register: Option) -> Self { + pub fn new(mem_chunk: QemuMemoryChunk, cpu: CPU, ret_register: Option) -> Self { Self { mem_chunk, cpu, @@ -279,158 +201,32 @@ impl InputLocation { } } -/// Synchronous Exit handler maintaining only one snapshot. -#[derive(Debug, Clone, TypedBuilder)] -pub struct StdEmulatorExitHandler -where - SM: IsSnapshotManager + Clone, -{ - snapshot_manager: RefCell, - #[builder(default)] - snapshot_id: OnceCell, - #[builder(default)] - input_location: OnceCell, -} - -impl StdEmulatorExitHandler -where - SM: IsSnapshotManager, -{ - pub fn new(snapshot_manager: SM) -> Self { - Self { - snapshot_manager: RefCell::new(snapshot_manager), - snapshot_id: OnceCell::new(), - input_location: OnceCell::new(), - } - } - - pub fn set_input_location(&self, input_location: InputLocation) -> Result<(), InputLocation> { - self.input_location.set(input_location) - } - - pub fn set_snapshot_id(&self, snapshot_id: SnapshotId) -> Result<(), SnapshotId> { - self.snapshot_id.set(snapshot_id) - } - - pub fn snapshot_id(&self) -> Option { - Some(*self.snapshot_id.get()?) - } - - pub fn snapshot_manager_borrow(&self) -> Ref { - self.snapshot_manager.borrow() - } - - pub fn snapshot_manager_borrow_mut(&self) -> RefMut { - self.snapshot_manager.borrow_mut() - } -} - -// TODO: replace handlers with generics to permit compile-time customization of handlers -impl EmulatorExitHandler for StdEmulatorExitHandler -where - QT: QemuHelperTuple + StdInstrumentationFilter + Debug, - S: State + HasExecutions, - S::Input: HasTargetBytes, - SM: IsSnapshotManager, -{ - fn qemu_pre_run>( - emu: &Emulator, - qemu_executor_state: &mut QemuExecutorState, - input: &S::Input, - ) { - let exit_handler = emu.exit_handler.borrow(); - - if let Some(input_location) = exit_handler.input_location.get() { - let input_command = - InputCommand::new(input_location.mem_chunk.clone(), input_location.cpu); - input_command - .run(emu, qemu_executor_state, input, input_location.ret_register) - .unwrap(); - } - } - - fn qemu_post_run>( - emu: &Emulator, - exit_reason: Result, EmulatorExitError>, - qemu_executor_state: &mut QemuExecutorState, - input: &S::Input, - ) -> Result>, ExitHandlerError> { - let exit_handler = emu.exit_handler().borrow_mut(); - let qemu = emu.qemu(); - - let mut exit_reason = match exit_reason { - Ok(exit_reason) => exit_reason, - Err(exit_error) => match exit_error { - EmulatorExitError::UnexpectedExit => { - if let Some(snapshot_id) = exit_handler.snapshot_id.get() { - exit_handler - .snapshot_manager - .borrow_mut() - .restore(snapshot_id, qemu)?; - } - return Ok(Some(ExitHandlerResult::EndOfRun(ExitKind::Crash))); - } - _ => Err(exit_error)?, - }, - }; - - #[allow(clippy::type_complexity)] - let (command, ret_reg): (Option>, Option) = - match &mut exit_reason { - EmulatorExitResult::QemuExit(shutdown_cause) => match shutdown_cause { - QemuShutdownCause::HostSignal(signal) => { - signal.handle(); - return Err(ExitHandlerError::UnhandledSignal(*signal)); - } - QemuShutdownCause::GuestPanic => { - return Ok(Some(ExitHandlerResult::EndOfRun(ExitKind::Crash))) - } - _ => panic!("Unhandled QEMU shutdown cause: {shutdown_cause:?}."), - }, - EmulatorExitResult::Breakpoint(bp) => (bp.borrow_mut().trigger(qemu), None), - EmulatorExitResult::SyncExit(sync_backdoor) => { - let sync_backdoor = sync_backdoor.borrow(); - let command = sync_backdoor.command(); - (Some(command), Some(sync_backdoor.ret_reg())) - } - }; - - // manually drop ref cell here to avoid keeping it alive in cmd. - drop(exit_handler); - - if let Some(cmd) = command { - cmd.run(emu, qemu_executor_state, input, ret_reg) - } else { - Ok(Some(ExitHandlerResult::ReturnToHarness(exit_reason))) - } - } -} - -impl From for ExitHandlerError { +impl From for EmulatorDriverError { fn from(error: EmulatorExitError) -> Self { - ExitHandlerError::QemuExitReasonError(error) + EmulatorDriverError::QemuExitReasonError(error) } } -impl From for ExitHandlerError { +impl From for EmulatorDriverError { fn from(error: CommandError) -> Self { - ExitHandlerError::CommandError(error) + EmulatorDriverError::CommandError(error) } } -impl Display for EmulatorExitResult +impl Display for EmulatorExitResult where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match self { EmulatorExitResult::QemuExit(shutdown_cause) => write!(f, "End: {shutdown_cause:?}"), - EmulatorExitResult::Breakpoint(bp) => write!(f, "{}", bp.borrow()), + EmulatorExitResult::Breakpoint(bp) => write!(f, "{bp}"), EmulatorExitResult::SyncExit(sync_exit) => { - write!(f, "Sync exit: {}", sync_exit.borrow()) + write!(f, "Sync exit: {sync_exit:?}") + } + EmulatorExitResult::Timeout => { + write!(f, "Timeout") } } } @@ -442,167 +238,261 @@ impl From for EmulatorExitError { } } -#[derive(Clone, Debug, TypedBuilder)] -pub struct Emulator +impl Emulator where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + S: UsesInput, { - command_manager: CM, - exit_handler: RefCell, - #[builder(default)] - breakpoints_by_addr: RefCell>>, - #[builder(default)] - breakpoints_by_id: RefCell>>, - qemu: Qemu, - _phantom: PhantomData<(QT, S)>, + #[must_use] + pub fn empty( + ) -> EmulatorBuilder { + EmulatorBuilder::empty() + } } -#[allow(clippy::unused_self)] -impl Emulator +impl Emulator, StdEmulatorDriver, (), S, StdSnapshotManager> where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + S: State + HasExecutions + Unpin, + S::Input: HasTargetBytes, +{ + #[must_use] + pub fn builder( + ) -> EmulatorBuilder, StdEmulatorDriver, (), S, StdSnapshotManager> { + EmulatorBuilder::default() + } +} + +impl Emulator +where + CM: CommandManager, + S: UsesInput, +{ + pub fn modules(&self) -> &EmulatorModules { + &self.modules + } + + #[must_use] + pub fn qemu(&self) -> Qemu { + self.qemu + } + + #[must_use] + pub fn driver(&self) -> &ED { + &self.driver + } + + #[must_use] + pub fn driver_mut(&mut self) -> &mut ED { + &mut self.driver + } + + #[must_use] + pub fn snapshot_manager(&self) -> &SM { + &self.snapshot_manager + } + + #[must_use] + pub fn snapshot_manager_mut(&mut self) -> &mut SM { + &mut self.snapshot_manager + } + + pub fn command_manager(&self) -> &CM { + &self.command_manager + } + + pub fn command_manager_mut(&mut self) -> &mut CM { + &mut self.command_manager + } +} + +impl Emulator +where + CM: CommandManager, + ET: Unpin, + S: UsesInput + Unpin, +{ + pub fn modules_mut(&mut self) -> &mut EmulatorModules { + self.modules.as_mut().get_mut() + } +} + +impl Emulator +where + CM: CommandManager, + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, { #[allow(clippy::must_use_candidate, clippy::similar_names)] pub fn new( - args: &[String], - env: &[(String, String)], - exit_handler: E, + qemu_args: &[String], + modules: ET, + driver: ED, + snapshot_manager: SM, command_manager: CM, ) -> Result { - let qemu = Qemu::init(args, env)?; + let mut emulator_hooks = unsafe { EmulatorHooks::new(QemuHooks::get_unchecked()) }; - Self::new_with_qemu(qemu, exit_handler, command_manager) + modules.pre_qemu_init_all(&mut emulator_hooks); + + let qemu = Qemu::init(qemu_args)?; + + unsafe { + Ok(Self::new_with_qemu( + qemu, + emulator_hooks, + modules, + driver, + snapshot_manager, + command_manager, + )) + } } - pub fn new_with_qemu( + /// New emulator with already initialized QEMU. + /// We suppose modules init hooks have already been run. + /// + /// # Safety + /// + /// pre-init qemu hooks should be run by then. + pub(crate) unsafe fn new_with_qemu( qemu: Qemu, - exit_handler: E, + emulator_hooks: EmulatorHooks, + modules: ET, + driver: ED, + snapshot_manager: SM, command_manager: CM, - ) -> Result { - Ok(Emulator { + ) -> Self { + let mut emulator = Emulator { + modules: EmulatorModules::new(qemu, emulator_hooks, modules), command_manager, - exit_handler: RefCell::new(exit_handler), + snapshot_manager, + driver, breakpoints_by_addr: RefCell::new(HashMap::new()), breakpoints_by_id: RefCell::new(HashMap::new()), - _phantom: PhantomData, qemu, - }) + }; + + emulator.modules.post_qemu_init_all(); + + emulator + } +} + +impl Emulator +where + CM: CommandManager, + ED: EmulatorDriver, + ET: EmulatorModuleTuple + Unpin, + S: UsesInput + Unpin, +{ + /// This function will run the emulator until the exit handler decides to stop the execution for + /// whatever reason, depending on the choosen handler. + /// It is a higher-level abstraction of [`Emulator::run`] that will take care of some part of the runtime logic, + /// returning only when something interesting happen. + /// + /// # Safety + /// Should, in general, be safe to call. + /// Of course, the emulated target is not contained securely and can corrupt state or interact with the operating system. + pub unsafe fn run( + &mut self, + state: &mut S, + input: &S::Input, + ) -> Result, EmulatorDriverError> { + loop { + // Insert input if the location is already known + ED::pre_qemu_exec(self, input); + + // Run QEMU + let mut exit_reason = self.run_qemu(); + + // Handle QEMU exit + if let Some(exit_handler_result) = + ED::post_qemu_exec(self, state, &mut exit_reason, input)? + { + return Ok(exit_handler_result); + } + } } - #[must_use] - pub fn qemu(&self) -> &Qemu { - &self.qemu + /// This function will run the emulator until the next breakpoint, or until finish. + /// # Safety + /// + /// Should, in general, be safe to call. + /// Of course, the emulated target is not contained securely and can corrupt state or interact with the operating system. + pub unsafe fn run_qemu( + &self, + ) -> Result, EmulatorExitError> { + match self.qemu.run() { + Ok(qemu_exit_reason) => Ok(match qemu_exit_reason { + QemuExitReason::End(qemu_shutdown_cause) => { + EmulatorExitResult::QemuExit(qemu_shutdown_cause) + } + QemuExitReason::Timeout => EmulatorExitResult::Timeout, + QemuExitReason::Breakpoint(bp_addr) => { + let bp = self + .breakpoints_by_addr + .borrow() + .get(&bp_addr) + .ok_or(EmulatorExitError::BreakpointNotFound(bp_addr))? + .clone(); + EmulatorExitResult::Breakpoint(bp.clone()) + } + QemuExitReason::SyncExit => EmulatorExitResult::SyncExit(SyncExit::new( + self.command_manager.parse(self.qemu)?, + )), + }), + Err(qemu_exit_reason_error) => Err(match qemu_exit_reason_error { + QemuExitError::UnexpectedExit => EmulatorExitError::UnexpectedExit, + QemuExitError::UnknownKind => EmulatorExitError::UnknownKind, + }), + } } - #[must_use] - pub fn exit_handler(&self) -> &RefCell { - &self.exit_handler + /// First exec of Emulator, called before calling to user harness the first time + pub fn first_exec(&mut self, state: &mut S) { + ED::first_harness_exec(self, state); } - #[must_use] - #[allow(clippy::cast_possible_wrap)] - #[allow(clippy::cast_sign_loss)] - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn num_cpus(&self) -> usize { - self.qemu.num_cpus() + /// Pre exec of Emulator, called before calling to user harness + pub fn pre_exec(&mut self, state: &mut S, input: &S::Input) { + ED::pre_harness_exec(self, state, input); } - #[must_use] - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn current_cpu(&self) -> Option { - self.qemu.current_cpu() - } - - #[must_use] - #[allow(clippy::cast_possible_wrap)] - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn cpu_from_index(&self, index: usize) -> CPU { - self.qemu.cpu_from_index(index) - } - - #[must_use] - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn page_from_addr(&self, addr: GuestAddr) -> GuestAddr { - self.qemu.page_from_addr(addr) - } - - //#[must_use] - /*pub fn page_size() -> GuestUsize { - unsafe { libafl_page_size } - }*/ - - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub unsafe fn write_mem(&self, addr: GuestAddr, buf: &[u8]) { - self.qemu.write_mem(addr, buf); - } - - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub unsafe fn read_mem(&self, addr: GuestAddr, buf: &mut [u8]) { - self.qemu.read_mem(addr, buf); - } - - #[must_use] - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn num_regs(&self) -> i32 { - self.qemu.num_regs() - } - - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn write_reg(&self, reg: R, val: T) -> Result<(), QemuRWError> - where - T: Num + PartialOrd + Copy + Into, - R: Into + Clone, + /// Post exec of Emulator, called before calling to user harness + pub fn post_exec( + &mut self, + input: &S::Input, + observers: &mut OT, + state: &mut S, + exit_kind: &mut ExitKind, + ) where + OT: ObserversTuple, { - self.qemu.write_reg(reg, val) + ED::post_harness_exec(self, input, observers, state, exit_kind); } +} - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn read_reg(&self, reg: R) -> Result - where - T: Num + PartialOrd + Copy + From, - R: Into + Clone, - { - self.qemu.read_reg(reg) - } - - pub fn add_breakpoint(&self, mut bp: Breakpoint, enable: bool) -> BreakpointId { +#[allow(clippy::unused_self)] +impl Emulator +where + CM: CommandManager, + S: UsesInput, +{ + pub fn add_breakpoint( + &self, + mut bp: Breakpoint, + enable: bool, + ) -> BreakpointId { if enable { - bp.enable(&self.qemu); + bp.enable(self.qemu); } let bp_id = bp.id(); let bp_addr = bp.addr(); - let bp_ref = Rc::new(RefCell::new(bp)); - assert!( self.breakpoints_by_addr .borrow_mut() - .insert(bp_addr, bp_ref.clone()) + .insert(bp_addr, bp.clone()) .is_none(), "Adding multiple breakpoints at the same address" ); @@ -610,7 +500,7 @@ where assert!( self.breakpoints_by_id .borrow_mut() - .insert(bp_id, bp_ref) + .insert(bp_id, bp) .is_none(), "Adding the same breakpoint multiple times" ); @@ -621,11 +511,8 @@ where pub fn remove_breakpoint(&self, bp_id: BreakpointId) { let bp_addr = { let mut bp_map = self.breakpoints_by_id.borrow_mut(); - let mut bp = bp_map - .get_mut(&bp_id) - .expect("Did not find the breakpoint") - .borrow_mut(); - bp.disable(&self.qemu); + let bp = bp_map.get_mut(&bp_id).expect("Did not find the breakpoint"); + bp.disable(self.qemu); bp.addr() }; @@ -638,210 +525,4 @@ where .remove(&bp_addr) .expect("Could not remove bp"); } - - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn entry_break(&self, addr: GuestAddr) { - self.qemu.entry_break(addr); - } - - /// This function will run the emulator until the next breakpoint, or until finish. - /// # Safety - /// - /// Should, in general, be safe to call. - /// Of course, the emulated target is not contained securely and can corrupt state or interact with the operating system. - unsafe fn run_qemu(&self) -> Result, EmulatorExitError> { - match self.qemu.run() { - Ok(qemu_exit_reason) => Ok(match qemu_exit_reason { - QemuExitReason::End(qemu_shutdown_cause) => { - EmulatorExitResult::QemuExit(qemu_shutdown_cause) - } - QemuExitReason::Breakpoint(bp_addr) => { - let bp = self - .breakpoints_by_addr - .borrow() - .get(&bp_addr) - .ok_or(EmulatorExitError::BreakpointNotFound(bp_addr))? - .clone(); - EmulatorExitResult::Breakpoint(bp.clone()) - } - QemuExitReason::SyncExit => EmulatorExitResult::SyncExit(Rc::new(RefCell::new( - SyncExit::new(self.command_manager.parse(self.qemu)?), - ))), - }), - Err(qemu_exit_reason_error) => Err(match qemu_exit_reason_error { - QemuExitError::UnexpectedExit => EmulatorExitError::UnexpectedExit, - QemuExitError::UnknownKind => EmulatorExitError::UnknownKind, - }), - } - } - - /// This function will run the emulator until the exit handler decides to stop the execution for - /// whatever reason, depending on the choosen handler. - /// It is a higher-level abstraction of [`Emulator::run`] that will take care of some part of the runtime logic, - /// returning only when something interesting happen. - /// - /// # Safety - /// Should, in general, be safe to call. - /// Of course, the emulated target is not contained securely and can corrupt state or interact with the operating system. - pub unsafe fn run( - &self, - input: &S::Input, - qemu_executor_state: &mut QemuExecutorState, - ) -> Result, ExitHandlerError> { - loop { - // Insert input if the location is already known - E::qemu_pre_run(self, qemu_executor_state, input); - - // Run QEMU - let exit_reason = self.run_qemu(); - - // Handle QEMU exit - if let Some(exit_handler_result) = - E::qemu_post_run(self, exit_reason, qemu_executor_state, input)? - { - return Ok(exit_handler_result); - } - } - } - - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn flush_jit(&self) { - self.qemu.flush_jit(); - } - - // TODO set T lifetime to be like Emulator - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn set_hook>( - &self, - data: T, - addr: GuestAddr, - callback: extern "C" fn(T, GuestAddr), - invalidate_block: bool, - ) -> InstructionHookId { - self.qemu.set_hook(data, addr, callback, invalidate_block) - } - - #[must_use] - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn remove_hook(&self, id: impl HookId, invalidate_block: bool) -> bool { - self.qemu.remove_hook(id, invalidate_block) - } - - #[must_use] - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn remove_hooks_at(&self, addr: GuestAddr, invalidate_block: bool) -> usize { - self.qemu.remove_hooks_at(addr, invalidate_block) - } - - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn add_edge_hooks>( - &self, - data: T, - gen: Option u64>, - exec: Option, - ) -> EdgeHookId { - self.qemu.add_edge_hooks(data, gen, exec) - } - - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn add_block_hooks>( - &self, - data: T, - gen: Option u64>, - post_gen: Option, - exec: Option, - ) -> BlockHookId { - self.qemu.add_block_hooks(data, gen, post_gen, exec) - } - - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn add_read_hooks>( - &self, - data: T, - gen: Option u64>, - exec1: Option, - exec2: Option, - exec4: Option, - exec8: Option, - exec_n: Option, - ) -> ReadHookId { - self.qemu - .add_read_hooks(data, gen, exec1, exec2, exec4, exec8, exec_n) - } - - // TODO add MemOp info - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn add_write_hooks>( - &self, - data: T, - gen: Option u64>, - exec1: Option, - exec2: Option, - exec4: Option, - exec8: Option, - exec_n: Option, - ) -> WriteHookId { - self.qemu - .add_write_hooks(data, gen, exec1, exec2, exec4, exec8, exec_n) - } - - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn add_cmp_hooks>( - &self, - data: T, - gen: Option u64>, - exec1: Option, - exec2: Option, - exec4: Option, - exec8: Option, - ) -> CmpHookId { - self.qemu - .add_cmp_hooks(data, gen, exec1, exec2, exec4, exec8) - } - - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn add_backdoor_hook>( - &self, - data: T, - callback: extern "C" fn(T, CPUArchStatePtr, GuestAddr), - ) -> BackdoorHookId { - self.qemu.add_backdoor_hook(data, callback) - } - - #[allow(clippy::type_complexity)] - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn add_gdb_cmd(&self, callback: Box bool>) { - self.qemu.add_gdb_cmd(callback); - } - - #[deprecated( - note = "This function has been moved to the `Qemu` low-level structure. Please access it through `emu.qemu()`." - )] - pub fn gdb_reply(&self, output: &str) { - self.qemu.gdb_reply(output); - } } diff --git a/libafl_qemu/src/emu/snapshot.rs b/libafl_qemu/src/emu/snapshot.rs new file mode 100644 index 0000000000..8d9f1a2c55 --- /dev/null +++ b/libafl_qemu/src/emu/snapshot.rs @@ -0,0 +1,121 @@ +use std::{ + fmt::Debug, + sync::atomic::{AtomicU64, Ordering}, +}; + +use crate::Qemu; + +pub trait IsSnapshotManager: Clone + Debug { + fn init(&mut self, _qemu: Qemu) {} + + fn save(&mut self, qemu: Qemu) -> SnapshotId; + fn restore(&mut self, qemu: Qemu, snapshot_id: &SnapshotId) + -> Result<(), SnapshotManagerError>; + fn do_check( + &self, + qemu: Qemu, + reference_snapshot_id: &SnapshotId, + ) -> Result; + + fn check( + &self, + qemu: Qemu, + reference_snapshot_id: &SnapshotId, + ) -> Result<(), SnapshotManagerCheckError> { + let check_result = self + .do_check(qemu, reference_snapshot_id) + .map_err(SnapshotManagerCheckError::SnapshotManagerError)?; + + if check_result == QemuSnapshotCheckResult::default() { + Ok(()) + } else { + Err(SnapshotManagerCheckError::SnapshotCheckError(check_result)) + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct QemuSnapshotCheckResult { + nb_page_inconsistencies: u64, +} + +#[derive(Debug, Clone)] +pub enum SnapshotManagerError { + SnapshotIdNotFound(SnapshotId), + MemoryInconsistencies(u64), +} + +#[derive(Debug, Clone)] +pub enum SnapshotManagerCheckError { + SnapshotManagerError(SnapshotManagerError), + SnapshotCheckError(QemuSnapshotCheckResult), +} + +#[derive(Debug, Clone, Copy)] +pub struct NopSnapshotManager; + +impl Default for NopSnapshotManager { + fn default() -> Self { + NopSnapshotManager + } +} + +impl IsSnapshotManager for NopSnapshotManager { + fn save(&mut self, _qemu: Qemu) -> SnapshotId { + SnapshotId { id: 0 } + } + + fn restore( + &mut self, + _qemu: Qemu, + _snapshot_id: &SnapshotId, + ) -> Result<(), SnapshotManagerError> { + Ok(()) + } + + fn do_check( + &self, + _qemu: Qemu, + _reference_snapshot_id: &SnapshotId, + ) -> Result { + Ok(QemuSnapshotCheckResult::default()) + } +} + +#[derive(Debug, PartialEq, Eq, Hash, Clone, Copy)] +pub struct SnapshotId { + id: u64, +} + +/// Represents a QEMU snapshot check result for which no error was detected +impl Default for QemuSnapshotCheckResult { + fn default() -> Self { + Self { + nb_page_inconsistencies: 0, + } + } +} + +impl QemuSnapshotCheckResult { + #[must_use] + pub fn new(nb_page_inconsistencies: u64) -> Self { + Self { + nb_page_inconsistencies, + } + } +} + +impl SnapshotId { + pub fn gen_unique_id() -> SnapshotId { + static UNIQUE_ID: AtomicU64 = AtomicU64::new(0); + + let unique_id = UNIQUE_ID.fetch_add(1, Ordering::SeqCst); + + SnapshotId { id: unique_id } + } + + #[must_use] + pub fn inner(&self) -> u64 { + self.id + } +} diff --git a/libafl_qemu/src/emu/systemmode.rs b/libafl_qemu/src/emu/systemmode.rs index 3dae121411..450144e393 100644 --- a/libafl_qemu/src/emu/systemmode.rs +++ b/libafl_qemu/src/emu/systemmode.rs @@ -1,40 +1,25 @@ -use std::{ - fmt::Debug, - sync::atomic::{AtomicU64, Ordering}, -}; +use std::fmt::Debug; use hashbrown::HashMap; -use libafl::state::{HasExecutions, State}; +use libafl::inputs::UsesInput; use libafl_qemu_sys::GuestPhysAddr; use crate::{ - command::CommandManager, emu::IsSnapshotManager, DeviceSnapshotFilter, Emulator, - EmulatorExitHandler, Qemu, QemuHelperTuple, QemuSnapshotCheckResult, SnapshotId, - SnapshotManagerError, + command::CommandManager, + emu::{IsSnapshotManager, QemuSnapshotCheckResult}, + DeviceSnapshotFilter, Emulator, Qemu, SnapshotId, SnapshotManagerError, }; -impl SnapshotId { - fn gen_unique_id() -> SnapshotId { - static UNIQUE_ID: AtomicU64 = AtomicU64::new(0); - - let unique_id = UNIQUE_ID.fetch_add(1, Ordering::SeqCst); - - SnapshotId { id: unique_id } - } - - fn inner(&self) -> u64 { - self.id - } -} - #[derive(Debug, Clone)] pub enum SnapshotManager { Qemu(QemuSnapshotManager), Fast(FastSnapshotManager), } +pub type StdSnapshotManager = FastSnapshotManager; + impl IsSnapshotManager for SnapshotManager { - fn save(&mut self, qemu: &Qemu) -> SnapshotId { + fn save(&mut self, qemu: Qemu) -> SnapshotId { match self { SnapshotManager::Qemu(qemu_sm) => qemu_sm.save(qemu), SnapshotManager::Fast(fast_sm) => fast_sm.save(qemu), @@ -43,23 +28,23 @@ impl IsSnapshotManager for SnapshotManager { fn restore( &mut self, + qemu: Qemu, snapshot_id: &SnapshotId, - qemu: &Qemu, ) -> Result<(), SnapshotManagerError> { match self { - SnapshotManager::Qemu(qemu_sm) => qemu_sm.restore(snapshot_id, qemu), - SnapshotManager::Fast(fast_sm) => fast_sm.restore(snapshot_id, qemu), + SnapshotManager::Qemu(qemu_sm) => qemu_sm.restore(qemu, snapshot_id), + SnapshotManager::Fast(fast_sm) => fast_sm.restore(qemu, snapshot_id), } } fn do_check( &self, + qemu: Qemu, reference_snapshot_id: &SnapshotId, - qemu: &Qemu, ) -> Result { match self { - SnapshotManager::Qemu(qemu_sm) => qemu_sm.do_check(reference_snapshot_id, qemu), - SnapshotManager::Fast(fast_sm) => fast_sm.do_check(reference_snapshot_id, qemu), + SnapshotManager::Qemu(qemu_sm) => qemu_sm.do_check(qemu, reference_snapshot_id), + SnapshotManager::Fast(fast_sm) => fast_sm.do_check(qemu, reference_snapshot_id), } } } @@ -94,6 +79,12 @@ pub struct QemuSnapshotManager { is_sync: bool, } +impl Default for QemuSnapshotManager { + fn default() -> Self { + QemuSnapshotManager::new(true) + } +} + impl QemuSnapshotManager { pub fn new(is_sync: bool) -> Self { Self { is_sync } @@ -105,7 +96,7 @@ impl QemuSnapshotManager { } impl IsSnapshotManager for QemuSnapshotManager { - fn save(&mut self, qemu: &Qemu) -> SnapshotId { + fn save(&mut self, qemu: Qemu) -> SnapshotId { let snapshot_id = SnapshotId::gen_unique_id(); qemu.save_snapshot( self.snapshot_id_to_name(&snapshot_id).as_str(), @@ -116,8 +107,8 @@ impl IsSnapshotManager for QemuSnapshotManager { fn restore( &mut self, + qemu: Qemu, snapshot_id: &SnapshotId, - qemu: &Qemu, ) -> Result<(), SnapshotManagerError> { qemu.load_snapshot(self.snapshot_id_to_name(snapshot_id).as_str(), self.is_sync); Ok(()) @@ -125,8 +116,8 @@ impl IsSnapshotManager for QemuSnapshotManager { fn do_check( &self, + _qemu: Qemu, _reference_snapshot_id: &SnapshotId, - _qemu: &Qemu, ) -> Result { // We consider the qemu implementation to be 'ideal' for now. Ok(QemuSnapshotCheckResult::default()) @@ -134,7 +125,7 @@ impl IsSnapshotManager for QemuSnapshotManager { } impl IsSnapshotManager for FastSnapshotManager { - fn save(&mut self, qemu: &Qemu) -> SnapshotId { + fn save(&mut self, qemu: Qemu) -> SnapshotId { let snapshot_id = SnapshotId::gen_unique_id(); self.snapshots .insert(snapshot_id, qemu.create_fast_snapshot(true)); @@ -143,8 +134,8 @@ impl IsSnapshotManager for FastSnapshotManager { fn restore( &mut self, + qemu: Qemu, snapshot_id: &SnapshotId, - qemu: &Qemu, ) -> Result<(), SnapshotManagerError> { let fast_snapshot_ptr = *self .snapshots @@ -160,8 +151,8 @@ impl IsSnapshotManager for FastSnapshotManager { fn do_check( &self, + qemu: Qemu, reference_snapshot_id: &SnapshotId, - qemu: &Qemu, ) -> Result { let fast_snapshot_ptr = *self.snapshots.get(reference_snapshot_id).ok_or( SnapshotManagerError::SnapshotIdNotFound(*reference_snapshot_id), @@ -171,12 +162,10 @@ impl IsSnapshotManager for FastSnapshotManager { } } -impl Emulator +impl Emulator where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { /// Write a value to a phsical guest address, including ROM areas. pub unsafe fn write_phys_mem(&self, paddr: GuestPhysAddr, buf: &[u8]) { diff --git a/libafl_qemu/src/emu/usermode.rs b/libafl_qemu/src/emu/usermode.rs index 5295a12948..0866a3d5b0 100644 --- a/libafl_qemu/src/emu/usermode.rs +++ b/libafl_qemu/src/emu/usermode.rs @@ -1,18 +1,14 @@ +use libafl::inputs::UsesInput; use libafl_qemu_sys::{GuestAddr, MmapPerms, VerifyAccess}; -use crate::{ - command::CommandManager, - emu::{HasExecutions, State}, - Emulator, EmulatorExitHandler, GuestMaps, HookData, NewThreadHookId, PostSyscallHookId, - PreSyscallHookId, QemuHelperTuple, SyscallHookResult, -}; +use crate::{command::CommandManager, Emulator, GuestMaps, NopSnapshotManager}; -impl Emulator +pub type StdSnapshotManager = NopSnapshotManager; + +impl Emulator where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { /// This function gets the memory mappings from the emulator. #[must_use] @@ -92,58 +88,4 @@ where pub fn unmap(&self, addr: GuestAddr, size: usize) -> Result<(), String> { self.qemu.unmap(addr, size) } - - #[allow(clippy::type_complexity)] - pub fn add_pre_syscall_hook>( - &self, - data: T, - callback: extern "C" fn( - T, - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> SyscallHookResult, - ) -> PreSyscallHookId { - self.qemu.add_pre_syscall_hook(data, callback) - } - - #[allow(clippy::type_complexity)] - pub fn add_post_syscall_hook>( - &self, - data: T, - callback: extern "C" fn( - T, - GuestAddr, - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> GuestAddr, - ) -> PostSyscallHookId { - self.qemu.add_post_syscall_hook(data, callback) - } - - pub fn add_new_thread_hook>( - &self, - data: T, - callback: extern "C" fn(T, tid: u32) -> bool, - ) -> NewThreadHookId { - self.qemu.add_new_thread_hook(data, callback) - } - - #[allow(clippy::type_complexity)] - pub fn set_crash_hook(&self, callback: extern "C" fn(i32)) { - self.qemu.set_crash_hook(callback); - } } diff --git a/libafl_qemu/src/executor.rs b/libafl_qemu/src/executor.rs new file mode 100644 index 0000000000..6a56c06c4b --- /dev/null +++ b/libafl_qemu/src/executor.rs @@ -0,0 +1,483 @@ +//! A `QEMU`-based executor for binary-only instrumentation in `LibAFL` +use core::{ + ffi::c_void, + fmt::{self, Debug, Formatter}, + time::Duration, +}; +#[cfg(feature = "usermode")] +use std::ptr; +#[cfg(feature = "systemmode")] +use std::sync::atomic::{AtomicBool, Ordering}; + +use libafl::{ + corpus::Corpus, + events::{EventFirer, EventRestarter}, + executors::{ + hooks::inprocess::InProcessExecutorHandlerData, + inprocess::{stateful::StatefulInProcessExecutor, HasInProcessHooks}, + inprocess_fork::stateful::StatefulInProcessForkExecutor, + Executor, ExitKind, HasObservers, + }, + feedbacks::Feedback, + fuzzer::HasObjective, + inputs::UsesInput, + observers::ObserversTuple, + state::{HasCorpus, HasExecutions, HasSolutions, State, UsesState}, + Error, ExecutionProcessor, HasScheduler, +}; +#[cfg(feature = "fork")] +use libafl_bolts::shmem::ShMemProvider; +use libafl_bolts::{ + os::unix_signals::{ucontext_t, Signal}, + tuples::RefIndexable, +}; +#[cfg(feature = "systemmode")] +use libafl_qemu_sys::libafl_exit_request_timeout; +#[cfg(feature = "usermode")] +use libafl_qemu_sys::libafl_qemu_handle_crash; +use libc::siginfo_t; + +#[cfg(feature = "usermode")] +use crate::EmulatorModules; +use crate::{command::CommandManager, modules::EmulatorModuleTuple, Emulator, EmulatorDriver}; + +pub struct QemuExecutor<'a, CM, ED, ET, H, OT, S, SM> +where + CM: CommandManager, + ET: EmulatorModuleTuple, + H: FnMut(&mut Emulator, &mut S, &S::Input) -> ExitKind, + OT: ObserversTuple, + S: State, +{ + inner: StatefulInProcessExecutor<'a, H, OT, S, Emulator>, + first_exec: bool, +} + +/// # Safety +/// +/// This should be used as a crash handler, and nothing else. +#[cfg(feature = "usermode")] +unsafe fn inproc_qemu_crash_handler( + signal: Signal, + info: &mut siginfo_t, + mut context: Option<&mut ucontext_t>, + _data: &mut InProcessExecutorHandlerData, +) where + ET: EmulatorModuleTuple, + S: UsesInput + Unpin, +{ + let puc = match &mut context { + Some(v) => ptr::from_mut::(*v) as *mut c_void, + None => ptr::null_mut(), + }; + + // run modules' crash callback + if let Some(emulator_modules) = EmulatorModules::::emulator_modules_mut() { + emulator_modules.modules_mut().on_crash_all(); + } + + libafl_qemu_handle_crash(signal as i32, info, puc); +} + +#[cfg(feature = "systemmode")] +pub(crate) static BREAK_ON_TMOUT: AtomicBool = AtomicBool::new(false); + +/// # Safety +/// Can call through the `unix_signal_handler::inproc_timeout_handler`. +/// Calling this method multiple times concurrently can lead to race conditions. +pub unsafe fn inproc_qemu_timeout_handler( + signal: Signal, + info: &mut siginfo_t, + context: Option<&mut ucontext_t>, + data: &mut InProcessExecutorHandlerData, +) where + E: HasObservers + HasInProcessHooks + Executor, + E::Observers: ObserversTuple, + E::State: HasExecutions + HasSolutions + HasCorpus, + EM: EventFirer + EventRestarter, + ET: EmulatorModuleTuple, + OF: Feedback, + S: State + Unpin, + Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me +{ + #[cfg(feature = "systemmode")] + { + if BREAK_ON_TMOUT.load(Ordering::Acquire) { + libafl_exit_request_timeout(); + } else { + libafl::executors::hooks::unix::unix_signal_handler::inproc_timeout_handler::< + E, + EM, + OF, + Z, + >(signal, info, context, data); + } + } + + #[cfg(feature = "usermode")] + { + // run modules' crash callback + if let Some(emulator_modules) = EmulatorModules::::emulator_modules_mut() { + emulator_modules.modules_mut().on_timeout_all(); + } + + libafl::executors::hooks::unix::unix_signal_handler::inproc_timeout_handler::( + signal, info, context, data, + ); + } +} + +impl Debug for QemuExecutor<'_, CM, ED, ET, H, OT, S, SM> +where + CM: CommandManager, + ET: EmulatorModuleTuple + Debug, + H: FnMut(&mut Emulator, &mut S, &S::Input) -> ExitKind, + OT: ObserversTuple + Debug, + S: State, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("QemuExecutor") + .field("inner", &self.inner) + .finish() + } +} + +impl<'a, CM, ED, ET, H, OT, S, SM> QemuExecutor<'a, CM, ED, ET, H, OT, S, SM> +where + CM: CommandManager, + ET: EmulatorModuleTuple, + H: FnMut(&mut Emulator, &mut S, &S::Input) -> ExitKind, + OT: ObserversTuple, + S: State, +{ + pub fn new( + emulator: Emulator, + harness_fn: &'a mut H, + observers: OT, + fuzzer: &mut Z, + state: &mut S, + event_mgr: &mut EM, + timeout: Duration, + ) -> Result + where + ED: EmulatorDriver, + EM: EventFirer + EventRestarter, + OF: Feedback, + S: Unpin + State + HasExecutions + HasCorpus + HasSolutions, + Z: HasObjective + + HasScheduler + + ExecutionProcessor, + S::Solutions: Corpus, //delete me + ::Input: Clone, //delete me + { + let mut inner = StatefulInProcessExecutor::with_timeout( + harness_fn, emulator, observers, fuzzer, state, event_mgr, timeout, + )?; + + #[cfg(feature = "usermode")] + { + inner.inprocess_hooks_mut().crash_handler = + inproc_qemu_crash_handler:: as *const c_void; + + let handler = |emulator_modules: &mut EmulatorModules, host_sig| { + eprintln!("Crashed with signal {host_sig}"); + unsafe { + libafl::executors::inprocess::generic_inproc_crash_handler::(); + } + if let Some(cpu) = emulator_modules.qemu().current_cpu() { + eprint!("Context:\n{}", cpu.display_context()); + } + }; + + // # Safety + // We assume our crash handlers to be safe/quit after execution. + unsafe { + inner + .exposed_executor_state_mut() + .modules_mut() + .crash_closure(Box::new(handler)); + } + } + + inner.inprocess_hooks_mut().timeout_handler = inproc_qemu_timeout_handler::< + StatefulInProcessExecutor<'a, H, OT, S, Emulator>, + EM, + ET, + OF, + S, + Z, + > as *const c_void; + + Ok(Self { + inner, + first_exec: true, + }) + } + + pub fn inner(&self) -> &StatefulInProcessExecutor<'a, H, OT, S, Emulator> { + &self.inner + } + + #[cfg(feature = "systemmode")] + pub fn break_on_timeout(&mut self) { + BREAK_ON_TMOUT.store(true, Ordering::Release); + } + + pub fn inner_mut( + &mut self, + ) -> &mut StatefulInProcessExecutor<'a, H, OT, S, Emulator> { + &mut self.inner + } +} + +impl Executor for QemuExecutor<'_, CM, ED, ET, H, OT, S, SM> +where + CM: CommandManager, + ED: EmulatorDriver, + EM: UsesState, + ET: EmulatorModuleTuple, + H: FnMut(&mut Emulator, &mut S, &S::Input) -> ExitKind, + OT: ObserversTuple, + S: State + HasExecutions + Unpin, + Z: UsesState, +{ + fn run_target( + &mut self, + fuzzer: &mut Z, + state: &mut Self::State, + mgr: &mut EM, + input: &Self::Input, + ) -> Result { + if self.first_exec { + self.inner.exposed_executor_state_mut().first_exec(state); + self.first_exec = false; + } + + self.inner + .exposed_executor_state_mut() + .pre_exec(state, input); + + let mut exit_kind = self.inner.run_target(fuzzer, state, mgr, input)?; + + self.inner.exposed_executor_state.post_exec( + input, + &mut *self.inner.inner.observers_mut(), + state, + &mut exit_kind, + ); + + Ok(exit_kind) + } +} + +impl UsesState for QemuExecutor<'_, CM, ED, ET, H, OT, S, SM> +where + CM: CommandManager, + ET: EmulatorModuleTuple, + H: FnMut(&mut Emulator, &mut S, &S::Input) -> ExitKind, + OT: ObserversTuple, + S: State, +{ + type State = S; +} + +impl HasObservers for QemuExecutor<'_, CM, ED, ET, H, OT, S, SM> +where + CM: CommandManager, + ET: EmulatorModuleTuple, + H: FnMut(&mut Emulator, &mut S, &S::Input) -> ExitKind, + OT: ObserversTuple, + S: State, +{ + type Observers = OT; + #[inline] + fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { + self.inner.observers() + } + + #[inline] + fn observers_mut(&mut self) -> RefIndexable<&mut Self::Observers, Self::Observers> { + self.inner.observers_mut() + } +} + +pub type QemuInProcessForkExecutor<'a, CM, ED, EM, ET, H, OT, S, SM, SP, Z> = + StatefulInProcessForkExecutor<'a, H, OT, S, SP, Emulator, EM, Z>; + +#[cfg(feature = "fork")] +pub struct QemuForkExecutor<'a, CM, ED, EM, ET, H, OT, S, SM, SP, Z> +where + CM: CommandManager, + ET: EmulatorModuleTuple, + H: FnMut(&mut Emulator, &S::Input) -> ExitKind + ?Sized, + OT: ObserversTuple, + S: UsesInput, + SP: ShMemProvider, + Z: UsesState, +{ + inner: QemuInProcessForkExecutor<'a, CM, ED, EM, ET, H, OT, S, SM, SP, Z>, +} + +#[cfg(feature = "fork")] +impl Debug + for QemuForkExecutor<'_, CM, ED, EM, ET, H, OT, S, SM, SP, Z> +where + CM: CommandManager + Debug, + EM: UsesState, + ED: Debug, + ET: EmulatorModuleTuple + Debug, + H: FnMut(&mut Emulator, &S::Input) -> ExitKind + ?Sized, + OT: ObserversTuple + Debug, + S: UsesInput + Debug, + SM: Debug, + SP: ShMemProvider, + Z: UsesState, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("QemuForkExecutor") + .field("inner", &self.inner) + .field("emulator", &self.inner.exposed_executor_state) + .finish() + } +} + +#[cfg(feature = "fork")] +impl<'a, CM, ED, EM, ET, H, OT, S, SM, SP, Z> + QemuForkExecutor<'a, CM, ED, EM, ET, H, OT, S, SM, SP, Z> +where + CM: CommandManager, + EM: EventFirer + EventRestarter, + ET: EmulatorModuleTuple, + H: FnMut(&mut Emulator, &S::Input) -> ExitKind + ?Sized, + OT: ObserversTuple, + S: State + HasSolutions, + SP: ShMemProvider, + Z: HasObjective, + Z::Objective: Feedback, +{ + #[allow(clippy::too_many_arguments)] + pub fn new( + emulator: Emulator, + harness_fn: &'a mut H, + observers: OT, + fuzzer: &mut Z, + state: &mut S, + event_mgr: &mut EM, + shmem_provider: SP, + timeout: Duration, + ) -> Result { + assert!(!ET::HOOKS_DO_SIDE_EFFECTS, "When using QemuForkExecutor, the hooks must not do any side effect as they will happen in the child process and then discarded"); + + Ok(Self { + inner: StatefulInProcessForkExecutor::new( + harness_fn, + emulator, + observers, + fuzzer, + state, + event_mgr, + timeout, + shmem_provider, + )?, + }) + } + + pub fn inner(&self) -> &QemuInProcessForkExecutor<'a, CM, ED, EM, ET, H, OT, S, SM, SP, Z> { + &self.inner + } + + pub fn inner_mut( + &mut self, + ) -> &mut QemuInProcessForkExecutor<'a, CM, ED, EM, ET, H, OT, S, SM, SP, Z> { + &mut self.inner + } + + pub fn emulator(&self) -> &Emulator { + &self.inner.exposed_executor_state + } + + pub fn emulator_mut(&mut self) -> &Emulator { + &mut self.inner.exposed_executor_state + } +} + +#[cfg(feature = "fork")] +impl Executor + for QemuForkExecutor<'_, CM, ED, EM, ET, H, OT, S, SM, SP, Z> +where + CM: CommandManager, + ED: EmulatorDriver, + EM: EventFirer + EventRestarter, + ET: EmulatorModuleTuple, + H: FnMut(&mut Emulator, &S::Input) -> ExitKind, + OF: Feedback, + OT: ObserversTuple + Debug, + S: State + HasExecutions + Unpin, + SP: ShMemProvider, + Z: HasObjective, +{ + fn run_target( + &mut self, + fuzzer: &mut Z, + state: &mut Self::State, + mgr: &mut EM, + input: &Self::Input, + ) -> Result { + self.inner.exposed_executor_state.first_exec(state); + + self.inner.exposed_executor_state.pre_exec(state, input); + + let mut exit_kind = self.inner.run_target(fuzzer, state, mgr, input)?; + + self.inner.exposed_executor_state.post_exec( + input, + &mut *self.inner.inner.observers_mut(), + state, + &mut exit_kind, + ); + + Ok(exit_kind) + } +} + +#[cfg(feature = "fork")] +impl UsesState + for QemuForkExecutor<'_, CM, ED, EM, ET, H, OT, S, SM, SP, Z> +where + CM: CommandManager, + ET: EmulatorModuleTuple, + H: FnMut(&mut Emulator, &S::Input) -> ExitKind + ?Sized, + OT: ObserversTuple, + S: State, + SP: ShMemProvider, + Z: UsesState, +{ + type State = S; +} + +#[cfg(feature = "fork")] +impl HasObservers + for QemuForkExecutor<'_, CM, ED, EM, ET, H, OT, S, SM, SP, Z> +where + CM: CommandManager, + EM: UsesState, + ET: EmulatorModuleTuple, + H: FnMut(&mut Emulator, &S::Input) -> ExitKind + ?Sized, + OT: ObserversTuple, + S: State, + SP: ShMemProvider, + Z: UsesState, +{ + type Observers = OT; + #[inline] + fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { + self.inner.observers() + } + + #[inline] + fn observers_mut(&mut self) -> RefIndexable<&mut Self::Observers, Self::Observers> { + self.inner.observers_mut() + } +} diff --git a/libafl_qemu/src/executor/mod.rs b/libafl_qemu/src/executor/mod.rs deleted file mode 100644 index 2df48fa062..0000000000 --- a/libafl_qemu/src/executor/mod.rs +++ /dev/null @@ -1,543 +0,0 @@ -//! A `QEMU`-based executor for binary-only instrumentation in `LibAFL` -#[cfg(emulation_mode = "usermode")] -use core::ptr; -use core::{ - ffi::c_void, - fmt::{self, Debug, Formatter}, - time::Duration, -}; - -#[cfg(feature = "fork")] -use libafl::{ - events::EventManager, executors::InProcessForkExecutor, state::HasLastReportTime, HasMetadata, -}; -use libafl::{ - events::{EventFirer, EventRestarter}, - executors::{ - hooks::inprocess::InProcessExecutorHandlerData, - inprocess::{HasInProcessHooks, InProcessExecutor}, - Executor, ExitKind, HasObservers, - }, - feedbacks::Feedback, - fuzzer::HasObjective, - observers::{ObserversTuple, UsesObservers}, - state::{HasCorpus, HasExecutions, HasSolutions, State, UsesState}, - Error, -}; -#[cfg(feature = "fork")] -use libafl_bolts::shmem::ShMemProvider; -use libafl_bolts::{ - os::unix_signals::{siginfo_t, ucontext_t, Signal}, - tuples::RefIndexable, -}; - -use crate::{helpers::QemuHelperTuple, hooks::QemuHooks, Qemu}; - -/// A version of `QemuExecutor` with a state accessible from the harness. -pub mod stateful; - -pub struct QemuExecutorState<'a, QT, S> -where - QT: QemuHelperTuple, - S: State + HasExecutions, -{ - hooks: &'a mut QemuHooks, - first_exec: bool, -} - -pub struct QemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple, - QT: QemuHelperTuple, -{ - inner: InProcessExecutor<'a, H, OT, S>, - state: QemuExecutorState<'a, QT, S>, -} - -impl<'a, H, OT, QT, S> Debug for QemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple + Debug, - QT: QemuHelperTuple + Debug, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("QemuExecutor") - .field("hooks", &self.state.hooks) - .field("inner", &self.inner) - .finish() - } -} - -#[cfg(emulation_mode = "usermode")] -extern "C" { - // Original QEMU user signal handler - fn libafl_qemu_handle_crash(signal: i32, info: *mut siginfo_t, puc: *mut c_void); -} - -#[cfg(emulation_mode = "usermode")] -pub unsafe fn inproc_qemu_crash_handler<'a, E, EM, OF, Z, QT, S>( - signal: Signal, - info: &'a mut siginfo_t, - mut context: Option<&'a mut ucontext_t>, - _data: &'a mut InProcessExecutorHandlerData, -) where - E: Executor + HasObservers, - EM: EventFirer + EventRestarter, - OF: Feedback, - E::State: HasExecutions + HasSolutions + HasCorpus, - Z: HasObjective, - QT: QemuHelperTuple + Debug + 'a, - S: State + HasExecutions + 'a, -{ - let puc = match &mut context { - Some(v) => ptr::from_mut::(*v) as *mut c_void, - None => ptr::null_mut(), - }; - libafl_qemu_handle_crash(signal as i32, info, puc); -} - -#[cfg(emulation_mode = "systemmode")] -pub(crate) static mut BREAK_ON_TMOUT: bool = false; - -#[cfg(emulation_mode = "systemmode")] -extern "C" { - fn qemu_system_debug_request(); -} - -#[cfg(emulation_mode = "systemmode")] -pub unsafe fn inproc_qemu_timeout_handler<'a, E, EM, OF, Z>( - signal: Signal, - info: &'a mut siginfo_t, - context: Option<&'a mut ucontext_t>, - data: &'a mut InProcessExecutorHandlerData, -) where - E: Executor + HasObservers + HasInProcessHooks, - EM: EventFirer + EventRestarter, - OF: Feedback, - E::State: HasSolutions + HasCorpus + HasExecutions, - Z: HasObjective, -{ - if BREAK_ON_TMOUT { - qemu_system_debug_request(); - } else { - libafl::executors::hooks::unix::unix_signal_handler::inproc_timeout_handler::( - signal, info, context, data, - ); - } -} - -impl<'a, QT, S> QemuExecutorState<'a, QT, S> -where - S: State + HasExecutions, - QT: QemuHelperTuple + Debug, -{ - pub fn new(hooks: &'a mut QemuHooks) -> Result - where - E: Executor + HasInProcessHooks + HasObservers, - EM: EventFirer + EventRestarter, - OF: Feedback, - OT: ObserversTuple, - S: State + HasExecutions + HasCorpus + HasSolutions, - Z: HasObjective, - { - #[cfg(emulation_mode = "usermode")] - { - let handler = |hooks: &mut QemuHooks, host_sig| { - eprintln!("Crashed with signal {host_sig}"); - unsafe { - libafl::executors::inprocess::generic_inproc_crash_handler::(); - } - if let Some(cpu) = hooks.qemu().current_cpu() { - eprint!("Context:\n{}", cpu.display_context()); - } - }; - - hooks.crash_closure(Box::new(handler)); - } - Ok(QemuExecutorState { - first_exec: true, - hooks, - }) - } - - #[must_use] - pub fn hooks(&self) -> &QemuHooks { - self.hooks - } - - pub fn hooks_mut(&mut self) -> &mut QemuHooks { - self.hooks - } - - #[must_use] - pub fn qemu(&self) -> &Qemu { - self.hooks.qemu() - } -} - -impl<'a, H, OT, QT, S> QemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple, - QT: QemuHelperTuple + Debug, -{ - pub fn new( - hooks: &'a mut QemuHooks, - harness_fn: &'a mut H, - observers: OT, - fuzzer: &mut Z, - state: &mut S, - event_mgr: &mut EM, - timeout: Duration, - ) -> Result - where - EM: EventFirer + EventRestarter, - OF: Feedback, - S: State + HasExecutions + HasCorpus + HasSolutions, - Z: HasObjective, - { - let mut inner = InProcessExecutor::with_timeout( - harness_fn, observers, fuzzer, state, event_mgr, timeout, - )?; - - #[cfg(emulation_mode = "usermode")] - { - inner.inprocess_hooks_mut().crash_handler = - inproc_qemu_crash_handler::, EM, OF, Z, QT, S> - as *const c_void; - } - - #[cfg(emulation_mode = "systemmode")] - { - inner.inprocess_hooks_mut().timeout_handler = - inproc_qemu_timeout_handler::, EM, OF, Z> - as *const c_void; - } - - let state = - QemuExecutorState::new::, EM, OF, OT, Z>(hooks)?; - - Ok(Self { inner, state }) - } - - pub fn inner(&self) -> &InProcessExecutor<'a, H, OT, S> { - &self.inner - } - - #[cfg(emulation_mode = "systemmode")] - pub fn break_on_timeout(&mut self) { - unsafe { - BREAK_ON_TMOUT = true; - } - } - - pub fn inner_mut(&mut self) -> &mut InProcessExecutor<'a, H, OT, S> { - &mut self.inner - } - - pub fn hooks(&self) -> &QemuHooks { - self.state.hooks() - } - - pub fn hooks_mut(&mut self) -> &mut QemuHooks { - self.state.hooks_mut() - } - - pub fn emulator(&self) -> &Qemu { - self.state.qemu() - } -} - -impl<'a, QT, S> QemuExecutorState<'a, QT, S> -where - S: State + HasExecutions + HasCorpus + HasSolutions, - QT: QemuHelperTuple + Debug, -{ - fn pre_exec(&mut self, input: &E::Input, qemu: Qemu) - where - E: Executor, - EM: EventFirer + EventRestarter, - OF: Feedback, - Z: HasObjective, - { - if self.first_exec { - self.hooks.helpers().first_exec_all(self.hooks); - self.first_exec = false; - } - self.hooks.helpers_mut().pre_exec_all(qemu, input); - } - - fn post_exec( - &mut self, - input: &E::Input, - qemu: Qemu, - observers: &mut OT, - exit_kind: &mut ExitKind, - ) where - E: Executor + HasObservers, - EM: EventFirer + EventRestarter, - OT: ObserversTuple, - OF: Feedback, - Z: HasObjective, - { - self.hooks - .helpers_mut() - .post_exec_all(qemu, input, observers, exit_kind); - } -} - -impl<'a, EM, H, OT, OF, QT, S, Z> Executor for QemuExecutor<'a, H, OT, QT, S> -where - EM: EventFirer + EventRestarter, - H: FnMut(&S::Input) -> ExitKind, - S: State + HasExecutions + HasCorpus + HasSolutions, - OT: ObserversTuple, - OF: Feedback, - QT: QemuHelperTuple + Debug, - Z: HasObjective, -{ - fn run_target( - &mut self, - fuzzer: &mut Z, - state: &mut Self::State, - mgr: &mut EM, - input: &Self::Input, - ) -> Result { - let qemu = Qemu::get().unwrap(); - self.state.pre_exec::(input, qemu); - let mut exit_kind = self.inner.run_target(fuzzer, state, mgr, input)?; - self.state.post_exec::( - input, - qemu, - &mut *self.inner.observers_mut(), - &mut exit_kind, - ); - Ok(exit_kind) - } -} - -impl<'a, H, OT, QT, S> UsesState for QemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input) -> ExitKind, - OT: ObserversTuple, - QT: QemuHelperTuple, - S: State + HasExecutions, -{ - type State = S; -} - -impl<'a, H, OT, QT, S> UsesObservers for QemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input) -> ExitKind, - OT: ObserversTuple, - QT: QemuHelperTuple, - S: State + HasExecutions, -{ - type Observers = OT; -} - -impl<'a, H, OT, QT, S> HasObservers for QemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple, - QT: QemuHelperTuple, -{ - #[inline] - fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { - self.inner.observers() - } - - #[inline] - fn observers_mut(&mut self) -> RefIndexable<&mut Self::Observers, Self::Observers> { - self.inner.observers_mut() - } -} - -#[cfg(feature = "fork")] -pub struct QemuForkExecutor<'a, H, OT, QT, S, SP, EM, Z> -where - H: FnMut(&S::Input) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple, - QT: QemuHelperTuple, - SP: ShMemProvider, - EM: UsesState, - Z: UsesState, -{ - inner: InProcessForkExecutor<'a, H, OT, S, SP, EM, Z>, - state: QemuExecutorState<'a, QT, S>, -} - -#[cfg(feature = "fork")] -impl<'a, H, OT, QT, S, SP, EM, Z> Debug for QemuForkExecutor<'a, H, OT, QT, S, SP, EM, Z> -where - H: FnMut(&S::Input) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple + Debug, - QT: QemuHelperTuple + Debug, - SP: ShMemProvider, - EM: UsesState, - Z: UsesState, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("QemuForkExecutor") - .field("hooks", &self.state.hooks) - .field("inner", &self.inner) - .finish() - } -} - -#[cfg(feature = "fork")] -impl<'a, H, OT, QT, S, SP, EM, Z, OF> QemuForkExecutor<'a, H, OT, QT, S, SP, EM, Z> -where - H: FnMut(&S::Input) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple, - QT: QemuHelperTuple, - SP: ShMemProvider, - EM: EventFirer + EventRestarter, - OF: Feedback, - S: HasSolutions, - Z: HasObjective, -{ - pub fn new( - hooks: &'a mut QemuHooks, - harness_fn: &'a mut H, - observers: OT, - fuzzer: &mut Z, - state: &mut S, - event_mgr: &mut EM, - shmem_provider: SP, - timeout: core::time::Duration, - ) -> Result { - assert!(!QT::HOOKS_DO_SIDE_EFFECTS, "When using QemuForkExecutor, the hooks must not do any side effect as they will happen in the child process and then discarded"); - - Ok(Self { - inner: InProcessForkExecutor::new( - harness_fn, - observers, - fuzzer, - state, - event_mgr, - timeout, - shmem_provider, - )?, - state: QemuExecutorState { - first_exec: true, - hooks, - }, - }) - } - - pub fn inner(&self) -> &InProcessForkExecutor<'a, H, OT, S, SP, EM, Z> { - &self.inner - } - - pub fn inner_mut(&mut self) -> &mut InProcessForkExecutor<'a, H, OT, S, SP, EM, Z> { - &mut self.inner - } - - pub fn hooks(&self) -> &QemuHooks { - self.state.hooks - } - - pub fn hooks_mut(&mut self) -> &mut QemuHooks { - self.state.hooks - } - - pub fn qemu(&self) -> &Qemu { - self.state.hooks.qemu() - } -} - -#[cfg(feature = "fork")] -impl<'a, EM, H, OT, QT, S, Z, SP, OF> Executor - for QemuForkExecutor<'a, H, OT, QT, S, SP, EM, Z> -where - EM: EventManager, Z, State = S>, - H: FnMut(&S::Input) -> ExitKind, - S: State + HasMetadata + HasExecutions + HasLastReportTime + HasCorpus + HasSolutions, - OT: ObserversTuple + Debug, - QT: QemuHelperTuple, - SP: ShMemProvider, - OF: Feedback, - Z: HasObjective, -{ - fn run_target( - &mut self, - fuzzer: &mut Z, - state: &mut Self::State, - mgr: &mut EM, - input: &Self::Input, - ) -> Result { - let qemu = *self.state.hooks.qemu(); - if self.state.first_exec { - self.state.hooks.helpers().first_exec_all(self.state.hooks); - self.state.first_exec = false; - } - self.state.hooks.helpers_mut().pre_exec_all(qemu, input); - let mut exit_kind = self.inner.run_target(fuzzer, state, mgr, input)?; - self.state.hooks.helpers_mut().post_exec_all( - qemu, - input, - &mut *self.inner.observers_mut(), - &mut exit_kind, - ); - Ok(exit_kind) - } -} - -#[cfg(feature = "fork")] -impl<'a, H, OT, QT, S, SP, EM, Z> UsesObservers for QemuForkExecutor<'a, H, OT, QT, S, SP, EM, Z> -where - H: FnMut(&S::Input) -> ExitKind, - OT: ObserversTuple, - QT: QemuHelperTuple, - S: State + HasExecutions, - SP: ShMemProvider, - EM: UsesState, - Z: UsesState, -{ - type Observers = OT; -} - -#[cfg(feature = "fork")] -impl<'a, H, OT, QT, S, SP, EM, Z> UsesState for QemuForkExecutor<'a, H, OT, QT, S, SP, EM, Z> -where - H: FnMut(&S::Input) -> ExitKind, - OT: ObserversTuple, - QT: QemuHelperTuple, - S: State + HasExecutions, - SP: ShMemProvider, - EM: UsesState, - Z: UsesState, -{ - type State = S; -} - -#[cfg(feature = "fork")] -impl<'a, H, OT, QT, S, SP, EM, Z> HasObservers for QemuForkExecutor<'a, H, OT, QT, S, SP, EM, Z> -where - H: FnMut(&S::Input) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple, - QT: QemuHelperTuple, - SP: ShMemProvider, - EM: UsesState, - Z: UsesState, -{ - #[inline] - fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { - self.inner.observers() - } - - #[inline] - fn observers_mut(&mut self) -> RefIndexable<&mut Self::Observers, Self::Observers> { - self.inner.observers_mut() - } -} diff --git a/libafl_qemu/src/executor/stateful.rs b/libafl_qemu/src/executor/stateful.rs deleted file mode 100644 index 145bb38576..0000000000 --- a/libafl_qemu/src/executor/stateful.rs +++ /dev/null @@ -1,211 +0,0 @@ -//! A `QEMU`-based executor for binary-only instrumentation in `LibAFL` -use core::{ - ffi::c_void, - fmt::{self, Debug, Formatter}, - time::Duration, -}; - -use libafl::{ - events::{EventFirer, EventRestarter}, - executors::{ - inprocess::{stateful::StatefulInProcessExecutor, HasInProcessHooks}, - Executor, ExitKind, HasObservers, - }, - feedbacks::Feedback, - fuzzer::HasObjective, - observers::{ObserversTuple, UsesObservers}, - state::{HasCorpus, HasExecutions, HasSolutions, State, UsesState}, - Error, -}; -use libafl_bolts::tuples::RefIndexable; - -#[cfg(emulation_mode = "usermode")] -use crate::executor::inproc_qemu_crash_handler; -#[cfg(emulation_mode = "systemmode")] -use crate::executor::{inproc_qemu_timeout_handler, BREAK_ON_TMOUT}; -use crate::{executor::QemuExecutorState, helpers::QemuHelperTuple, hooks::QemuHooks, Qemu}; - -pub struct StatefulQemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input, &mut QemuExecutorState<'a, QT, S>) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple, - QT: QemuHelperTuple, -{ - inner: StatefulInProcessExecutor<'a, H, OT, S, QemuExecutorState<'a, QT, S>>, -} - -impl<'a, H, OT, QT, S> Debug for StatefulQemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input, &mut QemuExecutorState<'a, QT, S>) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple + Debug, - QT: QemuHelperTuple + Debug, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("QemuExecutor") - .field("inner", &self.inner) - .finish() - } -} - -impl<'a, H, OT, QT, S> StatefulQemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input, &mut QemuExecutorState<'a, QT, S>) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple, - QT: QemuHelperTuple + Debug, -{ - pub fn new( - hooks: &'a mut QemuHooks, - harness_fn: &'a mut H, - observers: OT, - fuzzer: &mut Z, - state: &mut S, - event_mgr: &mut EM, - timeout: Duration, - ) -> Result - where - EM: EventFirer + EventRestarter, - OF: Feedback, - S: State + HasExecutions + HasCorpus + HasSolutions, - Z: HasObjective, - { - let qemu_state = QemuExecutorState::new::< - StatefulInProcessExecutor<'a, H, OT, S, QemuExecutorState<'a, QT, S>>, - EM, - OF, - OT, - Z, - >(hooks)?; - - let mut inner = StatefulInProcessExecutor::with_timeout( - harness_fn, qemu_state, observers, fuzzer, state, event_mgr, timeout, - )?; - - #[cfg(emulation_mode = "usermode")] - { - inner.inprocess_hooks_mut().crash_handler = inproc_qemu_crash_handler::< - StatefulInProcessExecutor<'a, H, OT, S, QemuExecutorState<'a, QT, S>>, - EM, - OF, - Z, - QT, - S, - > as *const c_void; - } - - #[cfg(emulation_mode = "systemmode")] - { - inner.inprocess_hooks_mut().timeout_handler = inproc_qemu_timeout_handler::< - StatefulInProcessExecutor<'a, H, OT, S, QemuExecutorState<'a, QT, S>>, - EM, - OF, - Z, - > as *const c_void; - } - - Ok(Self { inner }) - } - - pub fn inner(&self) -> &StatefulInProcessExecutor<'a, H, OT, S, QemuExecutorState<'a, QT, S>> { - &self.inner - } - - #[cfg(emulation_mode = "systemmode")] - pub fn break_on_timeout(&mut self) { - unsafe { - BREAK_ON_TMOUT = true; - } - } - - pub fn inner_mut( - &mut self, - ) -> &mut StatefulInProcessExecutor<'a, H, OT, S, QemuExecutorState<'a, QT, S>> { - &mut self.inner - } - - pub fn hooks(&self) -> &QemuHooks { - self.inner.exposed_executor_state().hooks() - } - - pub fn hooks_mut(&mut self) -> &mut QemuHooks { - self.inner.exposed_executor_state_mut().hooks_mut() - } - - pub fn emulator(&self) -> &Qemu { - self.inner.exposed_executor_state().qemu() - } -} - -impl<'a, EM, H, OT, OF, QT, S, Z> Executor for StatefulQemuExecutor<'a, H, OT, QT, S> -where - EM: EventFirer + EventRestarter, - H: FnMut(&S::Input, &mut QemuExecutorState<'a, QT, S>) -> ExitKind, - S: State + HasExecutions + HasCorpus + HasSolutions, - OT: ObserversTuple, - OF: Feedback, - QT: QemuHelperTuple + Debug, - Z: HasObjective, -{ - fn run_target( - &mut self, - fuzzer: &mut Z, - state: &mut Self::State, - mgr: &mut EM, - input: &Self::Input, - ) -> Result { - let qemu = Qemu::get().unwrap(); - self.inner - .exposed_executor_state_mut() - .pre_exec::(input, qemu); - let mut exit_kind = self.inner.run_target(fuzzer, state, mgr, input)?; - self.inner - .exposed_executor_state - .post_exec::( - input, - qemu, - &mut *self.inner.inner.observers_mut(), - &mut exit_kind, - ); - Ok(exit_kind) - } -} - -impl<'a, H, OT, QT, S> UsesState for StatefulQemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input, &mut QemuExecutorState<'a, QT, S>) -> ExitKind, - OT: ObserversTuple, - QT: QemuHelperTuple, - S: State + HasExecutions, -{ - type State = S; -} - -impl<'a, H, OT, QT, S> UsesObservers for StatefulQemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input, &mut QemuExecutorState<'a, QT, S>) -> ExitKind, - OT: ObserversTuple, - QT: QemuHelperTuple, - S: State + HasExecutions, -{ - type Observers = OT; -} - -impl<'a, H, OT, QT, S> HasObservers for StatefulQemuExecutor<'a, H, OT, QT, S> -where - H: FnMut(&S::Input, &mut QemuExecutorState<'a, QT, S>) -> ExitKind, - S: State + HasExecutions, - OT: ObserversTuple, - QT: QemuHelperTuple, -{ - #[inline] - fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { - self.inner.observers() - } - - #[inline] - fn observers_mut(&mut self) -> RefIndexable<&mut Self::Observers, Self::Observers> { - self.inner.observers_mut() - } -} diff --git a/libafl_qemu/src/helpers/drcov.rs b/libafl_qemu/src/helpers/drcov.rs deleted file mode 100644 index 46d26c77ec..0000000000 --- a/libafl_qemu/src/helpers/drcov.rs +++ /dev/null @@ -1,286 +0,0 @@ -use std::{path::PathBuf, sync::Mutex}; - -use hashbrown::{hash_map::Entry, HashMap}; -use libafl::{executors::ExitKind, inputs::UsesInput, observers::ObserversTuple, HasMetadata}; -use libafl_qemu_sys::{GuestAddr, GuestUsize}; -use libafl_targets::drcov::{DrCovBasicBlock, DrCovWriter}; -use rangemap::RangeMap; -use serde::{Deserialize, Serialize}; - -use crate::{ - helpers::{ - HasInstrumentationFilter, IsFilter, QemuHelper, QemuHelperTuple, - QemuInstrumentationAddressRangeFilter, - }, - hooks::{Hook, QemuHooks}, - Qemu, -}; - -static DRCOV_IDS: Mutex>> = Mutex::new(None); -static DRCOV_MAP: Mutex>> = Mutex::new(None); -static DRCOV_LENGTHS: Mutex>> = Mutex::new(None); - -#[cfg_attr( - any(not(feature = "serdeany_autoreg"), miri), - allow(clippy::unsafe_derive_deserialize) -)] // for SerdeAny -#[derive(Debug, Default, Serialize, Deserialize)] -pub struct QemuDrCovMetadata { - pub current_id: u64, -} - -impl QemuDrCovMetadata { - #[must_use] - pub fn new() -> Self { - Self { current_id: 0 } - } -} - -libafl_bolts::impl_serdeany!(QemuDrCovMetadata); - -#[derive(Debug)] -pub struct QemuDrCovHelper { - filter: QemuInstrumentationAddressRangeFilter, - module_mapping: RangeMap, - filename: PathBuf, - full_trace: bool, - drcov_len: usize, -} - -impl QemuDrCovHelper { - #[must_use] - #[allow(clippy::let_underscore_untyped)] - pub fn new( - filter: QemuInstrumentationAddressRangeFilter, - module_mapping: RangeMap, - filename: PathBuf, - full_trace: bool, - ) -> Self { - if full_trace { - let _ = DRCOV_IDS.lock().unwrap().insert(vec![]); - } - let _ = DRCOV_MAP.lock().unwrap().insert(HashMap::new()); - let _ = DRCOV_LENGTHS.lock().unwrap().insert(HashMap::new()); - Self { - filter, - module_mapping, - filename, - full_trace, - drcov_len: 0, - } - } - - #[must_use] - pub fn must_instrument(&self, addr: GuestAddr) -> bool { - self.filter.allowed(addr) - } -} - -impl HasInstrumentationFilter for QemuDrCovHelper { - fn filter(&self) -> &QemuInstrumentationAddressRangeFilter { - &self.filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationAddressRangeFilter { - &mut self.filter - } -} - -impl QemuHelper for QemuDrCovHelper -where - S: UsesInput + HasMetadata, -{ - fn init_hooks(&self, hooks: &QemuHooks) - where - QT: QemuHelperTuple, - { - hooks.blocks( - Hook::Function(gen_unique_block_ids::), - Hook::Function(gen_block_lengths::), - Hook::Function(exec_trace_block::), - ); - } - - fn pre_exec(&mut self, _qemu: Qemu, _input: &S::Input) {} - - fn post_exec( - &mut self, - _qemu: Qemu, - _input: &S::Input, - _observers: &mut OT, - _exit_kind: &mut ExitKind, - ) where - OT: ObserversTuple, - { - let lengths_opt = DRCOV_LENGTHS.lock().unwrap(); - let lengths = lengths_opt.as_ref().unwrap(); - if self.full_trace { - if DRCOV_IDS.lock().unwrap().as_ref().unwrap().len() > self.drcov_len { - let mut drcov_vec = Vec::::new(); - for id in DRCOV_IDS.lock().unwrap().as_ref().unwrap() { - 'pcs_full: for (pc, idm) in DRCOV_MAP.lock().unwrap().as_ref().unwrap() { - let mut module_found = false; - for module in self.module_mapping.iter() { - let (range, (_, _)) = module; - if *pc >= range.start.try_into().unwrap() - && *pc <= range.end.try_into().unwrap() - { - module_found = true; - break; - } - } - if !module_found { - continue 'pcs_full; - } - if *idm == *id { - match lengths.get(pc) { - Some(block_length) => { - drcov_vec.push(DrCovBasicBlock::new( - *pc as usize, - *pc as usize + *block_length as usize, - )); - } - None => { - log::info!("Failed to find block length for: {pc:}"); - } - } - } - } - } - - DrCovWriter::new(&self.module_mapping) - .write(&self.filename, &drcov_vec) - .expect("Failed to write coverage file"); - } - self.drcov_len = DRCOV_IDS.lock().unwrap().as_ref().unwrap().len(); - } else { - if DRCOV_MAP.lock().unwrap().as_ref().unwrap().len() > self.drcov_len { - let mut drcov_vec = Vec::::new(); - 'pcs: for (pc, _) in DRCOV_MAP.lock().unwrap().as_ref().unwrap() { - let mut module_found = false; - for module in self.module_mapping.iter() { - let (range, (_, _)) = module; - if *pc >= range.start.try_into().unwrap() - && *pc <= range.end.try_into().unwrap() - { - module_found = true; - break; - } - } - if !module_found { - continue 'pcs; - } - match lengths.get(pc) { - Some(block_length) => { - drcov_vec.push(DrCovBasicBlock::new( - *pc as usize, - *pc as usize + *block_length as usize, - )); - } - None => { - log::info!("Failed to find block length for: {pc:}"); - } - } - } - - DrCovWriter::new(&self.module_mapping) - .write(&self.filename, &drcov_vec) - .expect("Failed to write coverage file"); - } - self.drcov_len = DRCOV_MAP.lock().unwrap().as_ref().unwrap().len(); - } - } -} - -pub fn gen_unique_block_ids( - hooks: &mut QemuHooks, - state: Option<&mut S>, - pc: GuestAddr, -) -> Option -where - S: UsesInput + HasMetadata, - QT: QemuHelperTuple, -{ - let drcov_helper = hooks - .helpers() - .match_first_type::() - .unwrap(); - if !drcov_helper.must_instrument(pc) { - return None; - } - - let state = state.expect("The gen_unique_block_ids hook works only for in-process fuzzing"); - if state - .metadata_map_mut() - .get_mut::() - .is_none() - { - state.add_metadata(QemuDrCovMetadata::new()); - } - let meta = state - .metadata_map_mut() - .get_mut::() - .unwrap(); - - match DRCOV_MAP.lock().unwrap().as_mut().unwrap().entry(pc) { - Entry::Occupied(e) => { - let id = *e.get(); - if drcov_helper.full_trace { - Some(id) - } else { - None - } - } - Entry::Vacant(e) => { - let id = meta.current_id; - e.insert(id); - meta.current_id = id + 1; - if drcov_helper.full_trace { - // GuestAddress is u32 for 32 bit guests - #[allow(clippy::unnecessary_cast)] - Some(id as u64) - } else { - None - } - } - } -} - -pub fn gen_block_lengths( - hooks: &mut QemuHooks, - _state: Option<&mut S>, - pc: GuestAddr, - block_length: GuestUsize, -) where - S: UsesInput + HasMetadata, - QT: QemuHelperTuple, -{ - let drcov_helper = hooks - .helpers() - .match_first_type::() - .unwrap(); - if !drcov_helper.must_instrument(pc) { - return; - } - DRCOV_LENGTHS - .lock() - .unwrap() - .as_mut() - .unwrap() - .insert(pc, block_length); -} - -pub fn exec_trace_block(hooks: &mut QemuHooks, _state: Option<&mut S>, id: u64) -where - QT: QemuHelperTuple, - S: UsesInput + HasMetadata, -{ - if hooks - .helpers() - .match_first_type::() - .unwrap() - .full_trace - { - DRCOV_IDS.lock().unwrap().as_mut().unwrap().push(id); - } -} diff --git a/libafl_qemu/src/helpers/edges.rs b/libafl_qemu/src/helpers/edges.rs deleted file mode 100644 index e3547609fa..0000000000 --- a/libafl_qemu/src/helpers/edges.rs +++ /dev/null @@ -1,701 +0,0 @@ -use std::{cell::UnsafeCell, cmp::max}; - -use hashbrown::{hash_map::Entry, HashMap}; -use libafl::{inputs::UsesInput, HasMetadata}; -use libafl_qemu_sys::GuestAddr; -#[cfg(emulation_mode = "systemmode")] -use libafl_qemu_sys::GuestPhysAddr; -pub use libafl_targets::{ - edges_map_mut_ptr, EDGES_MAP, EDGES_MAP_PTR, EDGES_MAP_SIZE_IN_USE, EDGES_MAP_SIZE_MAX, - MAX_EDGES_FOUND, -}; -use serde::{Deserialize, Serialize}; - -#[cfg(emulation_mode = "systemmode")] -use crate::helpers::QemuInstrumentationPagingFilter; -use crate::{ - helpers::{ - hash_me, HasInstrumentationFilter, QemuHelper, QemuHelperTuple, - QemuInstrumentationAddressRangeFilter, - }, - hooks::{Hook, QemuHooks}, - IsFilter, -}; - -#[cfg_attr( - any(not(feature = "serdeany_autoreg"), miri), - allow(clippy::unsafe_derive_deserialize) -)] // for SerdeAny -#[derive(Debug, Default, Serialize, Deserialize)] -pub struct QemuEdgesMapMetadata { - pub map: HashMap<(GuestAddr, GuestAddr), u64>, - pub current_id: u64, -} - -impl QemuEdgesMapMetadata { - #[must_use] - pub fn new() -> Self { - Self { - map: HashMap::new(), - current_id: 0, - } - } -} - -libafl_bolts::impl_serdeany!(QemuEdgesMapMetadata); - -#[cfg(emulation_mode = "usermode")] -#[derive(Debug)] -pub struct QemuEdgeCoverageHelper { - address_filter: QemuInstrumentationAddressRangeFilter, - use_hitcounts: bool, -} - -#[cfg(emulation_mode = "systemmode")] -#[derive(Debug)] -pub struct QemuEdgeCoverageHelper { - address_filter: QemuInstrumentationAddressRangeFilter, - paging_filter: QemuInstrumentationPagingFilter, - use_hitcounts: bool, -} - -#[cfg(emulation_mode = "usermode")] -impl QemuEdgeCoverageHelper { - #[must_use] - pub fn new(address_filter: QemuInstrumentationAddressRangeFilter) -> Self { - Self { - address_filter, - use_hitcounts: true, - } - } - - #[must_use] - pub fn without_hitcounts(address_filter: QemuInstrumentationAddressRangeFilter) -> Self { - Self { - address_filter, - use_hitcounts: false, - } - } - - #[must_use] - pub fn must_instrument(&self, addr: GuestAddr) -> bool { - self.address_filter.allowed(addr) - } -} - -#[cfg(emulation_mode = "systemmode")] -impl QemuEdgeCoverageHelper { - #[must_use] - pub fn new( - address_filter: QemuInstrumentationAddressRangeFilter, - paging_filter: QemuInstrumentationPagingFilter, - ) -> Self { - Self { - address_filter, - paging_filter, - use_hitcounts: true, - } - } - - #[must_use] - pub fn without_hitcounts( - address_filter: QemuInstrumentationAddressRangeFilter, - paging_filter: QemuInstrumentationPagingFilter, - ) -> Self { - Self { - address_filter, - paging_filter, - use_hitcounts: false, - } - } - - #[must_use] - pub fn must_instrument(&self, addr: GuestAddr, paging_id: Option) -> bool { - self.address_filter.allowed(addr) && self.paging_filter.allowed(paging_id) - } -} - -#[cfg(emulation_mode = "usermode")] -impl Default for QemuEdgeCoverageHelper { - fn default() -> Self { - Self::new(QemuInstrumentationAddressRangeFilter::None) - } -} - -#[cfg(emulation_mode = "systemmode")] -impl Default for QemuEdgeCoverageHelper { - fn default() -> Self { - Self::new( - QemuInstrumentationAddressRangeFilter::None, - QemuInstrumentationPagingFilter::None, - ) - } -} - -impl HasInstrumentationFilter for QemuEdgeCoverageHelper { - fn filter(&self) -> &QemuInstrumentationAddressRangeFilter { - &self.address_filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationAddressRangeFilter { - &mut self.address_filter - } -} - -#[cfg(emulation_mode = "systemmode")] -impl HasInstrumentationFilter for QemuEdgeCoverageHelper { - fn filter(&self) -> &QemuInstrumentationPagingFilter { - &self.paging_filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationPagingFilter { - &mut self.paging_filter - } -} - -impl QemuHelper for QemuEdgeCoverageHelper -where - S: UsesInput + HasMetadata, -{ - fn first_exec(&self, hooks: &QemuHooks) - where - QT: QemuHelperTuple, - { - if self.use_hitcounts { - // hooks.edges( - // Hook::Function(gen_unique_edge_ids::), - // Hook::Raw(trace_edge_hitcount), - // ); - let hook_id = hooks.edges(Hook::Function(gen_unique_edge_ids::), Hook::Empty); - unsafe { - libafl_qemu_sys::libafl_qemu_edge_hook_set_jit( - hook_id.0, - Some(libafl_qemu_sys::libafl_jit_trace_edge_hitcount), - ); - } - } else { - // hooks.edges( - // Hook::Function(gen_unique_edge_ids::), - // Hook::Raw(trace_edge_single), - // ); - let hook_id = hooks.edges(Hook::Function(gen_unique_edge_ids::), Hook::Empty); - unsafe { - libafl_qemu_sys::libafl_qemu_edge_hook_set_jit( - hook_id.0, - Some(libafl_qemu_sys::libafl_jit_trace_edge_single), - ); - } - } - } -} - -pub type QemuCollidingEdgeCoverageHelper = QemuEdgeCoverageChildHelper; - -#[cfg(emulation_mode = "usermode")] -#[derive(Debug)] -pub struct QemuEdgeCoverageChildHelper { - address_filter: QemuInstrumentationAddressRangeFilter, - use_hitcounts: bool, -} - -#[cfg(emulation_mode = "systemmode")] -#[derive(Debug)] -pub struct QemuEdgeCoverageChildHelper { - address_filter: QemuInstrumentationAddressRangeFilter, - paging_filter: QemuInstrumentationPagingFilter, - use_hitcounts: bool, -} - -#[cfg(emulation_mode = "usermode")] -impl QemuEdgeCoverageChildHelper { - #[must_use] - pub fn new(address_filter: QemuInstrumentationAddressRangeFilter) -> Self { - Self { - address_filter, - use_hitcounts: true, - } - } - - #[must_use] - pub fn without_hitcounts(address_filter: QemuInstrumentationAddressRangeFilter) -> Self { - Self { - address_filter, - use_hitcounts: false, - } - } - - #[must_use] - pub fn must_instrument(&self, addr: GuestAddr) -> bool { - self.address_filter.allowed(addr) - } -} - -#[cfg(emulation_mode = "systemmode")] -impl QemuEdgeCoverageChildHelper { - #[must_use] - pub fn new( - address_filter: QemuInstrumentationAddressRangeFilter, - paging_filter: QemuInstrumentationPagingFilter, - ) -> Self { - Self { - address_filter, - paging_filter, - use_hitcounts: true, - } - } - - #[must_use] - pub fn without_hitcounts( - address_filter: QemuInstrumentationAddressRangeFilter, - paging_filter: QemuInstrumentationPagingFilter, - ) -> Self { - Self { - address_filter, - paging_filter, - use_hitcounts: false, - } - } - - #[must_use] - pub fn must_instrument(&self, addr: GuestAddr, paging_id: Option) -> bool { - self.address_filter.allowed(addr) && self.paging_filter.allowed(paging_id) - } -} - -#[cfg(emulation_mode = "usermode")] -impl Default for QemuEdgeCoverageChildHelper { - fn default() -> Self { - Self::new(QemuInstrumentationAddressRangeFilter::None) - } -} - -#[cfg(emulation_mode = "systemmode")] -impl Default for QemuEdgeCoverageChildHelper { - fn default() -> Self { - Self::new( - QemuInstrumentationAddressRangeFilter::None, - QemuInstrumentationPagingFilter::None, - ) - } -} - -impl HasInstrumentationFilter - for QemuEdgeCoverageChildHelper -{ - fn filter(&self) -> &QemuInstrumentationAddressRangeFilter { - &self.address_filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationAddressRangeFilter { - &mut self.address_filter - } -} - -#[cfg(emulation_mode = "systemmode")] -impl HasInstrumentationFilter for QemuEdgeCoverageChildHelper { - fn filter(&self) -> &QemuInstrumentationPagingFilter { - &self.paging_filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationPagingFilter { - &mut self.paging_filter - } -} - -impl QemuHelper for QemuEdgeCoverageChildHelper -where - S: UsesInput + HasMetadata, -{ - const HOOKS_DO_SIDE_EFFECTS: bool = false; - - fn first_exec(&self, hooks: &QemuHooks) - where - QT: QemuHelperTuple, - { - if self.use_hitcounts { - hooks.edges( - Hook::Function(gen_hashed_edge_ids::), - Hook::Raw(trace_edge_hitcount_ptr), - ); - } else { - hooks.edges( - Hook::Function(gen_hashed_edge_ids::), - Hook::Raw(trace_edge_single_ptr), - ); - } - } -} - -#[cfg(emulation_mode = "usermode")] -#[derive(Debug)] -pub struct QemuEdgeCoverageClassicHelper { - address_filter: QemuInstrumentationAddressRangeFilter, - use_hitcounts: bool, - use_jit: bool, -} - -#[cfg(emulation_mode = "systemmode")] -#[derive(Debug)] -pub struct QemuEdgeCoverageClassicHelper { - address_filter: QemuInstrumentationAddressRangeFilter, - paging_filter: QemuInstrumentationPagingFilter, - use_hitcounts: bool, - use_jit: bool, -} - -#[cfg(emulation_mode = "usermode")] -impl QemuEdgeCoverageClassicHelper { - #[must_use] - pub fn new(address_filter: QemuInstrumentationAddressRangeFilter, use_jit: bool) -> Self { - Self { - address_filter, - use_hitcounts: true, - use_jit, - } - } - - #[must_use] - pub fn without_hitcounts( - address_filter: QemuInstrumentationAddressRangeFilter, - use_jit: bool, - ) -> Self { - Self { - address_filter, - use_hitcounts: false, - use_jit, - } - } - - #[must_use] - pub fn must_instrument(&self, addr: GuestAddr) -> bool { - self.address_filter.allowed(addr) - } -} - -#[cfg(emulation_mode = "systemmode")] -impl QemuEdgeCoverageClassicHelper { - #[must_use] - pub fn new( - address_filter: QemuInstrumentationAddressRangeFilter, - paging_filter: QemuInstrumentationPagingFilter, - use_jit: bool, - ) -> Self { - Self { - address_filter, - paging_filter, - use_hitcounts: true, - use_jit, - } - } - - #[must_use] - pub fn without_hitcounts( - address_filter: QemuInstrumentationAddressRangeFilter, - paging_filter: QemuInstrumentationPagingFilter, - use_jit: bool, - ) -> Self { - Self { - address_filter, - paging_filter, - use_hitcounts: false, - use_jit, - } - } - - #[must_use] - pub fn must_instrument(&self, addr: GuestAddr, paging_id: Option) -> bool { - self.address_filter.allowed(addr) && self.paging_filter.allowed(paging_id) - } -} - -#[cfg(emulation_mode = "usermode")] -impl Default for QemuEdgeCoverageClassicHelper { - fn default() -> Self { - Self::new(QemuInstrumentationAddressRangeFilter::None, false) - } -} - -#[cfg(emulation_mode = "systemmode")] -impl Default for QemuEdgeCoverageClassicHelper { - fn default() -> Self { - Self::new( - QemuInstrumentationAddressRangeFilter::None, - QemuInstrumentationPagingFilter::None, - false, - ) - } -} - -impl HasInstrumentationFilter - for QemuEdgeCoverageClassicHelper -{ - fn filter(&self) -> &QemuInstrumentationAddressRangeFilter { - &self.address_filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationAddressRangeFilter { - &mut self.address_filter - } -} - -#[cfg(emulation_mode = "systemmode")] -impl HasInstrumentationFilter for QemuEdgeCoverageClassicHelper { - fn filter(&self) -> &QemuInstrumentationPagingFilter { - &self.paging_filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationPagingFilter { - &mut self.paging_filter - } -} - -#[allow(clippy::collapsible_else_if)] -impl QemuHelper for QemuEdgeCoverageClassicHelper -where - S: UsesInput + HasMetadata, -{ - const HOOKS_DO_SIDE_EFFECTS: bool = false; - - fn first_exec(&self, hooks: &QemuHooks) - where - QT: QemuHelperTuple, - { - if self.use_hitcounts { - if self.use_jit { - let hook_id = hooks.blocks( - Hook::Function(gen_hashed_block_ids::), - Hook::Empty, - Hook::Empty, - ); - - unsafe { - libafl_qemu_sys::libafl_qemu_block_hook_set_jit( - hook_id.0, - Some(libafl_qemu_sys::libafl_jit_trace_block_hitcount), - ); - } - } else { - hooks.blocks( - Hook::Function(gen_hashed_block_ids::), - Hook::Empty, - Hook::Raw(trace_block_transition_hitcount), - ); - } - } else { - if self.use_jit { - let hook_id = hooks.blocks( - Hook::Function(gen_hashed_block_ids::), - Hook::Empty, - Hook::Empty, - ); - - unsafe { - libafl_qemu_sys::libafl_qemu_block_hook_set_jit( - hook_id.0, - Some(libafl_qemu_sys::libafl_jit_trace_block_single), - ); - } - } else { - hooks.blocks( - Hook::Function(gen_hashed_block_ids::), - Hook::Empty, - Hook::Raw(trace_block_transition_single), - ); - } - } - } -} - -thread_local!(static PREV_LOC : UnsafeCell = const { UnsafeCell::new(0) }); - -pub fn gen_unique_edge_ids( - hooks: &mut QemuHooks, - state: Option<&mut S>, - src: GuestAddr, - dest: GuestAddr, -) -> Option -where - S: UsesInput + HasMetadata, - QT: QemuHelperTuple, -{ - if let Some(h) = hooks.helpers().match_first_type::() { - #[cfg(emulation_mode = "usermode")] - { - if !h.must_instrument(src) && !h.must_instrument(dest) { - return None; - } - } - - #[cfg(emulation_mode = "systemmode")] - { - let paging_id = hooks - .qemu() - .current_cpu() - .and_then(|cpu| cpu.current_paging_id()); - - if !h.must_instrument(src, paging_id) && !h.must_instrument(dest, paging_id) { - return None; - } - } - } - let state = state.expect("The gen_unique_edge_ids hook works only for in-process fuzzing"); - let meta = state.metadata_or_insert_with(QemuEdgesMapMetadata::new); - - match meta.map.entry((src, dest)) { - Entry::Occupied(e) => { - let id = *e.get(); - let nxt = (id as usize + 1) & (EDGES_MAP_SIZE_MAX - 1); - unsafe { - MAX_EDGES_FOUND = max(MAX_EDGES_FOUND, nxt); - } - Some(id) - } - Entry::Vacant(e) => { - let id = meta.current_id; - e.insert(id); - meta.current_id = (id + 1) & (EDGES_MAP_SIZE_MAX as u64 - 1); - unsafe { - MAX_EDGES_FOUND = meta.current_id as usize; - } - // GuestAddress is u32 for 32 bit guests - #[allow(clippy::unnecessary_cast)] - Some(id as u64) - } - } -} - -pub extern "C" fn trace_edge_hitcount(_: *const (), id: u64) { - unsafe { - EDGES_MAP[id as usize] = EDGES_MAP[id as usize].wrapping_add(1); - } -} - -pub extern "C" fn trace_edge_single(_: *const (), id: u64) { - unsafe { - EDGES_MAP[id as usize] = 1; - } -} - -pub fn gen_hashed_edge_ids( - hooks: &mut QemuHooks, - _state: Option<&mut S>, - src: GuestAddr, - dest: GuestAddr, -) -> Option -where - S: UsesInput, - QT: QemuHelperTuple, -{ - if let Some(h) = hooks - .helpers() - .match_first_type::() - { - #[cfg(emulation_mode = "usermode")] - if !h.must_instrument(src) && !h.must_instrument(dest) { - return None; - } - - #[cfg(emulation_mode = "systemmode")] - { - let paging_id = hooks - .qemu() - .current_cpu() - .and_then(|cpu| cpu.current_paging_id()); - - if !h.must_instrument(src, paging_id) && !h.must_instrument(dest, paging_id) { - return None; - } - } - } - // GuestAddress is u32 for 32 bit guests - #[allow(clippy::unnecessary_cast)] - Some((hash_me(src as u64) ^ hash_me(dest as u64)) & (EDGES_MAP_SIZE_MAX as u64 - 1)) -} - -pub extern "C" fn trace_edge_hitcount_ptr(_: *const (), id: u64) { - unsafe { - let ptr = EDGES_MAP_PTR.add(id as usize); - *ptr = (*ptr).wrapping_add(1); - } -} - -pub extern "C" fn trace_edge_single_ptr(_: *const (), id: u64) { - unsafe { - let ptr = EDGES_MAP_PTR.add(id as usize); - *ptr = 1; - } -} - -/* -pub fn gen_addr_block_ids( - _hooks: &mut QemuHooks, - _state: Option<&mut S>, - pc: GuestAddr, -) -> Option -where - S: UsesInput, - QT: QemuHelperTuple, -{ - // GuestAddress is u32 for 32 bit guests - #[allow(clippy::unnecessary_cast)] - Some(pc as u64) -} -*/ - -pub fn gen_hashed_block_ids( - hooks: &mut QemuHooks, - _state: Option<&mut S>, - pc: GuestAddr, -) -> Option -where - S: UsesInput, - QT: QemuHelperTuple, -{ - if let Some(h) = hooks - .helpers() - .match_first_type::() - { - #[cfg(emulation_mode = "usermode")] - { - if !h.must_instrument(pc) { - return None; - } - } - #[cfg(emulation_mode = "systemmode")] - { - let paging_id = hooks - .qemu() - .current_cpu() - .and_then(|cpu| cpu.current_paging_id()); - - if !h.must_instrument(pc, paging_id) { - return None; - } - } - } - // GuestAddress is u32 for 32 bit guests - #[allow(clippy::unnecessary_cast)] - Some(hash_me(pc as u64)) -} - -pub extern "C" fn trace_block_transition_hitcount(_: *const (), id: u64) { - unsafe { - PREV_LOC.with(|prev_loc| { - let x = ((*prev_loc.get() ^ id) as usize) & (EDGES_MAP_SIZE_MAX - 1); - let entry = EDGES_MAP_PTR.add(x); - *entry = (*entry).wrapping_add(1); - *prev_loc.get() = id.overflowing_shr(1).0; - }); - } -} - -pub extern "C" fn trace_block_transition_single(_: *const (), id: u64) { - unsafe { - PREV_LOC.with(|prev_loc| { - let x = ((*prev_loc.get() ^ id) as usize) & (EDGES_MAP_SIZE_MAX - 1); - let entry = EDGES_MAP_PTR.add(x); - *entry = 1; - *prev_loc.get() = id.overflowing_shr(1).0; - }); - } -} diff --git a/libafl_qemu/src/helpers/mod.rs b/libafl_qemu/src/helpers/mod.rs deleted file mode 100644 index e375ecdc5d..0000000000 --- a/libafl_qemu/src/helpers/mod.rs +++ /dev/null @@ -1,360 +0,0 @@ -use core::{fmt::Debug, ops::Range}; -use std::{cell::UnsafeCell, collections::HashSet, hash::BuildHasher}; - -use libafl::{executors::ExitKind, inputs::UsesInput, observers::ObserversTuple}; -use libafl_bolts::tuples::{MatchFirstType, SplitBorrowExtractFirstType}; -use libafl_qemu_sys::{GuestAddr, GuestPhysAddr}; - -use crate::{hooks::QemuHooks, Qemu}; - -pub mod edges; -pub use edges::QemuEdgeCoverageHelper; - -#[cfg(not(cpu_target = "hexagon"))] -pub mod calls; -#[cfg(not(cpu_target = "hexagon"))] -pub use calls::QemuCallTracerHelper; - -#[cfg(not(cpu_target = "hexagon"))] -pub mod drcov; -#[cfg(not(cpu_target = "hexagon"))] -pub use drcov::QemuDrCovHelper; - -#[cfg(not(any(cpu_target = "mips", cpu_target = "hexagon")))] -pub mod cmplog; -#[cfg(not(any(cpu_target = "mips", cpu_target = "hexagon")))] -pub use cmplog::QemuCmpLogHelper; - -#[cfg(all(emulation_mode = "usermode", feature = "injections"))] -pub mod injections; -#[cfg(all(emulation_mode = "usermode", feature = "injections"))] -pub use injections::QemuInjectionHelper; - -#[cfg(all(emulation_mode = "usermode", not(cpu_target = "hexagon")))] -pub mod snapshot; -#[cfg(all(emulation_mode = "usermode", not(cpu_target = "hexagon")))] -pub use snapshot::IntervalSnapshotFilter; -#[cfg(all(emulation_mode = "usermode", not(cpu_target = "hexagon")))] -pub use snapshot::QemuSnapshotHelper; - -#[cfg(all(emulation_mode = "usermode", not(cpu_target = "hexagon")))] -pub mod asan; -#[cfg(all(emulation_mode = "usermode", not(cpu_target = "hexagon")))] -pub use asan::{init_qemu_with_asan, QemuAsanHelper}; - -#[cfg(all(emulation_mode = "usermode", not(cpu_target = "hexagon")))] -pub mod asan_guest; -#[cfg(all(emulation_mode = "usermode", not(cpu_target = "hexagon")))] -pub use asan_guest::{init_qemu_with_asan_guest, QemuAsanGuestHelper}; - -/// A helper for `libafl_qemu`. -// TODO remove 'static when specialization will be stable -pub trait QemuHelper: 'static + Debug -where - S: UsesInput, -{ - const HOOKS_DO_SIDE_EFFECTS: bool = true; - - fn init_hooks(&self, _hooks: &QemuHooks) - where - QT: QemuHelperTuple, - { - } - - fn first_exec(&self, _hooks: &QemuHooks) - where - QT: QemuHelperTuple, - { - } - - fn pre_exec(&mut self, _qemu: Qemu, _input: &S::Input) {} - - fn post_exec( - &mut self, - _qemu: Qemu, - _input: &S::Input, - _observers: &mut OT, - _exit_kind: &mut ExitKind, - ) where - OT: ObserversTuple, - { - } -} - -pub trait QemuHelperTuple: MatchFirstType + for<'a> SplitBorrowExtractFirstType<'a> -where - S: UsesInput, -{ - const HOOKS_DO_SIDE_EFFECTS: bool; - - fn init_hooks_all(&self, hooks: &QemuHooks) - where - QT: QemuHelperTuple; - - fn first_exec_all(&self, hooks: &QemuHooks) - where - QT: QemuHelperTuple; - - fn pre_exec_all(&mut self, _qemu: Qemu, input: &S::Input); - - fn post_exec_all( - &mut self, - _qemu: Qemu, - input: &S::Input, - _observers: &mut OT, - _exit_kind: &mut ExitKind, - ) where - OT: ObserversTuple; -} - -impl QemuHelperTuple for () -where - S: UsesInput, -{ - const HOOKS_DO_SIDE_EFFECTS: bool = false; - - fn init_hooks_all(&self, _hooks: &QemuHooks) - where - QT: QemuHelperTuple, - { - } - - fn first_exec_all(&self, _hooks: &QemuHooks) - where - QT: QemuHelperTuple, - { - } - - fn pre_exec_all(&mut self, _qemu: Qemu, _input: &S::Input) {} - - fn post_exec_all( - &mut self, - _qemu: Qemu, - _input: &S::Input, - _observers: &mut OT, - _exit_kind: &mut ExitKind, - ) where - OT: ObserversTuple, - { - } -} - -impl HasInstrumentationFilter<()> for () { - fn filter(&self) -> &() { - self - } - - fn filter_mut(&mut self) -> &mut () { - self - } -} - -impl HasInstrumentationFilter for (Head, ()) -where - Head: HasInstrumentationFilter, - F: IsFilter, -{ - fn filter(&self) -> &F { - self.0.filter() - } - - fn filter_mut(&mut self) -> &mut F { - self.0.filter_mut() - } -} - -impl QemuHelperTuple for (Head, Tail) -where - Head: QemuHelper, - Tail: QemuHelperTuple, - S: UsesInput, -{ - const HOOKS_DO_SIDE_EFFECTS: bool = Head::HOOKS_DO_SIDE_EFFECTS || Tail::HOOKS_DO_SIDE_EFFECTS; - - fn init_hooks_all(&self, hooks: &QemuHooks) - where - QT: QemuHelperTuple, - { - self.0.init_hooks(hooks); - self.1.init_hooks_all(hooks); - } - - fn first_exec_all(&self, hooks: &QemuHooks) - where - QT: QemuHelperTuple, - { - self.0.first_exec(hooks); - self.1.first_exec_all(hooks); - } - - fn pre_exec_all(&mut self, qemu: Qemu, input: &S::Input) { - self.0.pre_exec(qemu, input); - self.1.pre_exec_all(qemu, input); - } - - fn post_exec_all( - &mut self, - qemu: Qemu, - input: &S::Input, - observers: &mut OT, - exit_kind: &mut ExitKind, - ) where - OT: ObserversTuple, - { - self.0.post_exec(qemu, input, observers, exit_kind); - self.1.post_exec_all(qemu, input, observers, exit_kind); - } -} - -#[derive(Debug, Clone)] -pub enum QemuFilterList { - AllowList(T), - DenyList(T), - None, -} - -impl IsFilter for QemuFilterList -where - T: IsFilter + Clone, -{ - type FilterParameter = T::FilterParameter; - - fn allowed(&self, filter_parameter: Self::FilterParameter) -> bool { - match self { - QemuFilterList::AllowList(allow_list) => allow_list.allowed(filter_parameter), - QemuFilterList::DenyList(deny_list) => !deny_list.allowed(filter_parameter), - QemuFilterList::None => true, - } - } -} - -pub type QemuInstrumentationPagingFilter = QemuFilterList>; - -impl IsFilter for HashSet -where - H: BuildHasher, -{ - type FilterParameter = Option; - - fn allowed(&self, paging_id: Self::FilterParameter) -> bool { - paging_id.is_some_and(|pid| self.contains(&pid)) - } -} - -pub type QemuInstrumentationAddressRangeFilter = QemuFilterList>>; - -impl IsFilter for Vec> { - type FilterParameter = GuestAddr; - - fn allowed(&self, addr: Self::FilterParameter) -> bool { - for rng in self { - if rng.contains(&addr) { - return true; - } - } - false - } -} - -pub trait HasInstrumentationFilter -where - F: IsFilter, -{ - fn filter(&self) -> &F; - - fn filter_mut(&mut self) -> &mut F; - - fn update_filter(&mut self, filter: F, emu: &Qemu) { - *self.filter_mut() = filter; - emu.flush_jit(); - } -} - -#[cfg(emulation_mode = "usermode")] -pub trait StdInstrumentationFilter: - HasInstrumentationFilter -{ -} - -#[cfg(emulation_mode = "systemmode")] -pub trait StdInstrumentationFilter: - HasInstrumentationFilter - + HasInstrumentationFilter -{ -} - -static mut EMPTY_ADDRESS_FILTER: UnsafeCell = - UnsafeCell::new(QemuFilterList::None); -static mut EMPTY_PAGING_FILTER: UnsafeCell = - UnsafeCell::new(QemuFilterList::None); - -impl HasInstrumentationFilter for () { - fn filter(&self) -> &QemuInstrumentationAddressRangeFilter { - &QemuFilterList::None - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationAddressRangeFilter { - unsafe { EMPTY_ADDRESS_FILTER.get_mut() } - } -} - -impl HasInstrumentationFilter for () { - fn filter(&self) -> &QemuInstrumentationPagingFilter { - &QemuFilterList::None - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationPagingFilter { - unsafe { EMPTY_PAGING_FILTER.get_mut() } - } -} - -#[cfg(emulation_mode = "systemmode")] -impl StdInstrumentationFilter for (Head, ()) where - Head: HasInstrumentationFilter - + HasInstrumentationFilter -{ -} - -#[cfg(emulation_mode = "usermode")] -impl StdInstrumentationFilter for (Head, ()) where - Head: HasInstrumentationFilter -{ -} - -#[cfg(emulation_mode = "systemmode")] -impl StdInstrumentationFilter for () {} - -#[cfg(emulation_mode = "usermode")] -impl StdInstrumentationFilter for () {} - -pub trait IsFilter: Debug { - type FilterParameter; - - fn allowed(&self, filter_parameter: Self::FilterParameter) -> bool; -} - -impl IsFilter for () { - type FilterParameter = (); - - fn allowed(&self, _filter_parameter: Self::FilterParameter) -> bool { - true - } -} - -pub trait IsAddressFilter: IsFilter {} - -#[cfg(emulation_mode = "systemmode")] -pub trait IsPagingFilter: IsFilter> {} - -#[cfg(emulation_mode = "systemmode")] -impl IsPagingFilter for QemuInstrumentationPagingFilter {} - -impl IsAddressFilter for QemuInstrumentationAddressRangeFilter {} - -#[must_use] -pub fn hash_me(mut x: u64) -> u64 { - x = (x.overflowing_shr(16).0 ^ x).overflowing_mul(0x45d9f3b).0; - x = (x.overflowing_shr(16).0 ^ x).overflowing_mul(0x45d9f3b).0; - x = (x.overflowing_shr(16).0 ^ x) ^ x; - x -} diff --git a/libafl_qemu/src/hooks.rs b/libafl_qemu/src/hooks.rs deleted file mode 100644 index 45dc32dda0..0000000000 --- a/libafl_qemu/src/hooks.rs +++ /dev/null @@ -1,1446 +0,0 @@ -//! The high-level hooks -#![allow(clippy::type_complexity, clippy::missing_transmute_annotations)] - -#[cfg(emulation_mode = "usermode")] -use core::ptr::addr_of_mut; -use core::{ - ffi::c_void, - fmt::{self, Debug, Formatter}, - marker::PhantomData, - mem::transmute, - pin::Pin, - ptr::{self, addr_of}, -}; - -use libafl::{ - executors::{hooks::inprocess::inprocess_get_state, ExitKind}, - inputs::UsesInput, - state::NopState, -}; -use libafl_qemu_sys::{CPUArchStatePtr, FatPtr, GuestAddr, GuestUsize}; - -pub use crate::qemu::SyscallHookResult; -use crate::{ - helpers::QemuHelperTuple, - qemu::{MemAccessInfo, Qemu, SKIP_EXEC_HOOK}, - sys::TCGTemp, - BackdoorHookId, BlockHookId, CmpHookId, EdgeHookId, HookId, InstructionHookId, ReadHookId, JmpHookId, - WriteHookId, -}; -#[cfg(emulation_mode = "usermode")] -use crate::{NewThreadHookId, PostSyscallHookId, PreSyscallHookId}; - -/* -// all kinds of hooks -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub(crate) enum Hook { - Function(*const c_void), - Closure(FatPtr), - #[cfg(emulation_mode = "usermode")] - Once(Box), - Empty, -} -*/ - -// all kinds of hooks -#[derive(Clone, PartialEq, Eq, Debug)] -pub(crate) enum HookRepr { - Function(*const c_void), - Closure(FatPtr), - Empty, -} - -pub struct HookState { - id: H, - gen: HookRepr, - post_gen: HookRepr, - execs: [HookRepr; N], -} - -pub enum Hook { - Function(F), - Closure(C), - Raw(R), - Empty, -} - -impl Hook { - pub fn is_empty(&self) -> bool { - matches!(self, Hook::Empty) - } -} - -macro_rules! get_raw_hook { - ($h:expr, $replacement:expr, $fntype:ty) => { - match $h { - Hook::Function(_) | Hook::Closure(_) => Some($replacement as $fntype), - Hook::Raw(r) => { - let v: $fntype = transmute(r); - Some(v) - } - Hook::Empty => None, - } - }; -} - -macro_rules! hook_to_repr { - ($h:expr) => { - match $h { - Hook::Function(f) => HookRepr::Function(f as *const libc::c_void), - Hook::Closure(c) => HookRepr::Closure(transmute(c)), - Hook::Raw(_) => HookRepr::Empty, // managed by emu - Hook::Empty => HookRepr::Empty, - } - }; -} - -static mut QEMU_HOOKS_PTR: *const c_void = ptr::null(); - -#[must_use] -pub unsafe fn get_qemu_hooks<'a, QT, S>() -> &'a mut QemuHooks -where - S: UsesInput, - QT: QemuHelperTuple, -{ - (QEMU_HOOKS_PTR as *mut QemuHooks) - .as_mut() - .expect("A high-level hook is installed but QemuHooks is not initialized") -} - -macro_rules! create_wrapper { - ($name:ident, ($($param:ident : $param_type:ty),*)) => { - paste::paste! { - extern "C" fn [](hook: &mut c_void, $($param: $param_type),*) - where - S: UsesInput, - QT: QemuHelperTuple, - { - unsafe { - let hooks = get_qemu_hooks::(); - let func: fn(&mut QemuHooks, Option<&mut S>, $($param_type),*) = transmute(ptr::from_mut::(hook)); - func(hooks, inprocess_get_state::(), $($param),*); - } - } - - extern "C" fn [](hook: &mut FatPtr, $($param: $param_type),*) - where - S: UsesInput, - QT: QemuHelperTuple, - { - unsafe { - let hooks = get_qemu_hooks::(); - let func: &mut Box, Option<&mut S>, $($param_type),*)> = transmute(hook); - func(hooks, inprocess_get_state::(), $($param),*); - } - } - } - }; - ($name:ident, ($($param:ident : $param_type:ty),*), $ret_type:ty) => { - paste::paste! { - extern "C" fn [](hook: &mut c_void, $($param: $param_type),*) -> $ret_type - where - S: UsesInput, - QT: QemuHelperTuple, - { - unsafe { - let hooks = get_qemu_hooks::(); - let func: fn(&mut QemuHooks, Option<&mut S>, $($param_type),*) -> $ret_type= transmute(ptr::from_mut::(hook)); - func(hooks, inprocess_get_state::(), $($param),*) - } - } - - extern "C" fn [](hook: &mut FatPtr, $($param: $param_type),*) -> $ret_type - where - S: UsesInput, - QT: QemuHelperTuple, - { - unsafe { - let hooks = get_qemu_hooks::(); - let func: &mut Box, Option<&mut S>, $($param_type),*) -> $ret_type> = transmute(hook); - func(hooks, inprocess_get_state::(), $($param),*) - } - } - } - }; -} - -macro_rules! create_gen_wrapper { - ($name:ident, ($($param:ident : $param_type:ty),*), $ret_type:ty, $execs:literal, $hook_id:ident) => { - paste::paste! { - extern "C" fn [<$name _gen_hook_wrapper>](hook: &mut HookState<{ $execs }, $hook_id>, $($param: $param_type),*) -> $ret_type - where - S: UsesInput, - QT: QemuHelperTuple, - { - unsafe { - let hooks = get_qemu_hooks::(); - match &mut hook.gen { - HookRepr::Function(ptr) => { - let func: fn(&mut QemuHooks, Option<&mut S>, $($param_type),*) -> Option<$ret_type> = - transmute(*ptr); - func(hooks, inprocess_get_state::(), $($param),*).map_or(SKIP_EXEC_HOOK, |id| id) - } - HookRepr::Closure(ptr) => { - let func: &mut Box< - dyn FnMut(&mut QemuHooks, Option<&mut S>, $($param_type),*) -> Option<$ret_type>, - > = transmute(ptr); - func(hooks, inprocess_get_state::(), $($param),*).map_or(SKIP_EXEC_HOOK, |id| id) - } - _ => 0, - } - } - } - } - } -} - -macro_rules! create_post_gen_wrapper { - ($name:ident, ($($param:ident : $param_type:ty),*), $execs:literal, $hook_id:ident) => { - paste::paste! { - extern "C" fn [<$name _post_gen_hook_wrapper>](hook: &mut HookState<{ $execs }, $hook_id>, $($param: $param_type),*) - where - S: UsesInput, - QT: QemuHelperTuple, - { - unsafe { - let hooks = get_qemu_hooks::(); - match &mut hook.post_gen { - HookRepr::Function(ptr) => { - let func: fn(&mut QemuHooks, Option<&mut S>, $($param_type),*) = - transmute(*ptr); - func(hooks, inprocess_get_state::(), $($param),*); - } - HookRepr::Closure(ptr) => { - let func: &mut Box< - dyn FnMut(&mut QemuHooks, Option<&mut S>, $($param_type),*), - > = transmute(ptr); - func(hooks, inprocess_get_state::(), $($param),*); - } - _ => (), - } - } - } - } - } -} - -macro_rules! create_exec_wrapper { - ($name:ident, ($($param:ident : $param_type:ty),*), $execidx:literal, $execs:literal, $hook_id:ident) => { - paste::paste! { - extern "C" fn [<$name _ $execidx _exec_hook_wrapper>](hook: &mut HookState<{ $execs }, $hook_id>, $($param: $param_type),*) - where - S: UsesInput, - QT: QemuHelperTuple, - { - unsafe { - let hooks = get_qemu_hooks::(); - match &mut hook.execs[$execidx] { - HookRepr::Function(ptr) => { - let func: fn(&mut QemuHooks, Option<&mut S>, $($param_type),*) = transmute(*ptr); - func(hooks, inprocess_get_state::(), $($param),*); - } - HookRepr::Closure(ptr) => { - let func: &mut Box, Option<&mut S>, $($param_type),*)> = - transmute(ptr); - func(hooks, inprocess_get_state::(), $($param),*); - } - _ => (), - } - } - } - } - } -} - -static mut GENERIC_HOOKS: Vec>> = vec![]; -create_wrapper!(generic, (pc: GuestAddr)); -static mut BACKDOOR_HOOKS: Vec>> = vec![]; -create_wrapper!(backdoor, (cpu: CPUArchStatePtr, pc: GuestAddr)); - -#[cfg(emulation_mode = "usermode")] -static mut PRE_SYSCALL_HOOKS: Vec>> = vec![]; -#[cfg(emulation_mode = "usermode")] -create_wrapper!( - pre_syscall, - ( - sys_num: i32, - a0: GuestAddr, - a1: GuestAddr, - a2: GuestAddr, - a3: GuestAddr, - a4: GuestAddr, - a5: GuestAddr, - a6: GuestAddr, - a7: GuestAddr - ), - SyscallHookResult -); -#[cfg(emulation_mode = "usermode")] -static mut POST_SYSCALL_HOOKS: Vec>> = vec![]; -#[cfg(emulation_mode = "usermode")] -create_wrapper!( - post_syscall, - ( - res: GuestAddr, - sys_num: i32, - a0: GuestAddr, - a1: GuestAddr, - a2: GuestAddr, - a3: GuestAddr, - a4: GuestAddr, - a5: GuestAddr, - a6: GuestAddr, - a7: GuestAddr - ), - GuestAddr -); -#[cfg(emulation_mode = "usermode")] -static mut NEW_THREAD_HOOKS: Vec>> = vec![]; -#[cfg(emulation_mode = "usermode")] -create_wrapper!(new_thread, (tid: u32), bool); - -static mut EDGE_HOOKS: Vec>>> = vec![]; -create_gen_wrapper!(edge, (src: GuestAddr, dest: GuestAddr), u64, 1, EdgeHookId); -create_exec_wrapper!(edge, (id: u64), 0, 1, EdgeHookId); - -static mut BLOCK_HOOKS: Vec>>> = vec![]; -create_gen_wrapper!(block, (addr: GuestAddr), u64, 1, BlockHookId); -create_post_gen_wrapper!(block, (addr: GuestAddr, len: GuestUsize), 1, BlockHookId); -create_exec_wrapper!(block, (id: u64), 0, 1, BlockHookId); - -static mut READ_HOOKS: Vec>>> = vec![]; -create_gen_wrapper!(read, (pc: GuestAddr, addr: *mut TCGTemp, info: MemAccessInfo), u64, 5, ReadHookId); -create_exec_wrapper!(read, (id: u64, addr: GuestAddr), 0, 5, ReadHookId); -create_exec_wrapper!(read, (id: u64, addr: GuestAddr), 1, 5, ReadHookId); -create_exec_wrapper!(read, (id: u64, addr: GuestAddr), 2, 5, ReadHookId); -create_exec_wrapper!(read, (id: u64, addr: GuestAddr), 3, 5, ReadHookId); -create_exec_wrapper!( - read, - (id: u64, addr: GuestAddr, size: usize), - 4, - 5, - ReadHookId -); - -static mut WRITE_HOOKS: Vec>>> = vec![]; -create_gen_wrapper!(write, (pc: GuestAddr, addr: *mut TCGTemp, info: MemAccessInfo), u64, 5, WriteHookId); -create_exec_wrapper!(write, (id: u64, addr: GuestAddr), 0, 5, WriteHookId); -create_exec_wrapper!(write, (id: u64, addr: GuestAddr), 1, 5, WriteHookId); -create_exec_wrapper!(write, (id: u64, addr: GuestAddr), 2, 5, WriteHookId); -create_exec_wrapper!(write, (id: u64, addr: GuestAddr), 3, 5, WriteHookId); -create_exec_wrapper!( - write, - (id: u64, addr: GuestAddr, size: usize), - 4, - 5, - WriteHookId -); - -static mut CMP_HOOKS: Vec>>> = vec![]; -create_gen_wrapper!(cmp, (pc: GuestAddr, size: usize), u64, 4, CmpHookId); -create_exec_wrapper!(cmp, (id: u64, v0: u8, v1: u8), 0, 4, CmpHookId); -create_exec_wrapper!(cmp, (id: u64, v0: u16, v1: u16), 1, 4, CmpHookId); -create_exec_wrapper!(cmp, (id: u64, v0: u32, v1: u32), 2, 4, CmpHookId); -create_exec_wrapper!(cmp, (id: u64, v0: u64, v1: u64), 3, 4, CmpHookId); - -#[cfg(emulation_mode = "usermode")] -static mut CRASH_HOOKS: Vec = vec![]; - -#[cfg(emulation_mode = "usermode")] -extern "C" fn crash_hook_wrapper(target_sig: i32) -where - S: UsesInput, - QT: QemuHelperTuple, -{ - unsafe { - let hooks = get_qemu_hooks::(); - for hook in &mut (*addr_of_mut!(CRASH_HOOKS)) { - match hook { - HookRepr::Function(ptr) => { - let func: fn(&mut QemuHooks, i32) = transmute(*ptr); - func(hooks, target_sig); - } - HookRepr::Closure(ptr) => { - let func: &mut Box, i32)> = transmute(ptr); - func(hooks, target_sig); - } - HookRepr::Empty => (), - } - } - } -} - -static mut JMP_HOOKS: Vec>>> = vec![]; -create_gen_wrapper!(jmp, (src: GuestAddr, dest: GuestAddr), u64, 1, JmpHookId); -create_exec_wrapper!(jmp, (src: GuestAddr, dst: GuestAddr, id: u64), 0, 1, JmpHookId); - - - -static mut HOOKS_IS_INITIALIZED: bool = false; -static mut FIRST_EXEC: bool = true; - -pub struct QemuHooks -where - QT: QemuHelperTuple, - S: UsesInput, -{ - helpers: QT, - qemu: Qemu, - phantom: PhantomData, -} - -impl Debug for QemuHooks -where - S: UsesInput, - QT: QemuHelperTuple + Debug, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - f.debug_struct("QemuHooks") - .field("helpers", &self.helpers) - .field("emulator", &self.qemu) - .finish() - } -} - -impl QemuHooks> -where - QT: QemuHelperTuple>, - NopState: UsesInput, -{ - pub fn reproducer(qemu: Qemu, helpers: QT) -> Box { - Self::new(qemu, helpers) - } - - pub fn repro_run(&mut self, harness: &mut H, input: &I) -> ExitKind - where - H: FnMut(&I) -> ExitKind, - { - unsafe { - if FIRST_EXEC { - self.helpers.first_exec_all(self); - FIRST_EXEC = false; - } - } - self.helpers.pre_exec_all(self.qemu, input); - - let mut exit_kind = harness(input); - - self.helpers - .post_exec_all(self.qemu, input, &mut (), &mut exit_kind); - - exit_kind - } -} - -impl QemuHooks -where - QT: QemuHelperTuple, - S: UsesInput, -{ - pub fn new(qemu: Qemu, helpers: QT) -> Box { - unsafe { - assert!( - !HOOKS_IS_INITIALIZED, - "Only an instance of QemuHooks is permitted" - ); - HOOKS_IS_INITIALIZED = true; - } - // re-translate blocks with hooks - qemu.flush_jit(); - let slf = Box::new(Self { - qemu, - helpers, - phantom: PhantomData, - }); - slf.helpers.init_hooks_all(&slf); - unsafe { - QEMU_HOOKS_PTR = addr_of!(*slf) as *const c_void; - } - slf - } - - #[must_use] - pub fn match_helper(&self) -> Option<&T> - where - T: 'static, - { - self.helpers.match_first_type::() - } - - #[must_use] - pub fn match_helper_mut(&mut self) -> Option<&mut T> - where - T: 'static, - { - self.helpers.match_first_type_mut::() - } - - pub fn qemu(&self) -> &Qemu { - &self.qemu - } - - pub fn helpers(&self) -> &QT { - &self.helpers - } - - pub fn helpers_mut(&mut self) -> &mut QT { - &mut self.helpers - } - - pub fn instruction( - &self, - addr: GuestAddr, - hook: Hook< - fn(&mut Self, Option<&mut S>, GuestAddr), - Box FnMut(&'a mut Self, Option<&'a mut S>, GuestAddr)>, - extern "C" fn(*const (), pc: GuestAddr), - >, - invalidate_block: bool, - ) -> InstructionHookId { - match hook { - Hook::Function(f) => self.instruction_function(addr, f, invalidate_block), - Hook::Closure(c) => self.instruction_closure(addr, c, invalidate_block), - Hook::Raw(r) => { - let z: *const () = ptr::null::<()>(); - self.qemu.set_hook(z, addr, r, invalidate_block) - } - Hook::Empty => InstructionHookId(0), // TODO error type - } - } - - pub fn instruction_function( - &self, - addr: GuestAddr, - hook: fn(&mut Self, Option<&mut S>, GuestAddr), - invalidate_block: bool, - ) -> InstructionHookId { - unsafe { - self.qemu.set_hook( - transmute(hook), - addr, - func_generic_hook_wrapper::, - invalidate_block, - ) - } - } - - pub fn instruction_closure( - &self, - addr: GuestAddr, - hook: Box FnMut(&'a mut Self, Option<&'a mut S>, GuestAddr)>, - invalidate_block: bool, - ) -> InstructionHookId { - unsafe { - let fat: FatPtr = transmute(hook); - GENERIC_HOOKS.push(Box::pin((InstructionHookId(0), fat))); - let id = self.qemu.set_hook( - &mut GENERIC_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .1, - addr, - closure_generic_hook_wrapper::, - invalidate_block, - ); - GENERIC_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .0 = id; - id - } - } - - pub fn edges( - &self, - generation_hook: Hook< - fn(&mut Self, Option<&mut S>, src: GuestAddr, dest: GuestAddr) -> Option, - Box< - dyn for<'a> FnMut( - &'a mut Self, - Option<&'a mut S>, - GuestAddr, - GuestAddr, - ) -> Option, - >, - extern "C" fn(*const (), src: GuestAddr, dest: GuestAddr) -> u64, - >, - execution_hook: Hook< - fn(&mut Self, Option<&mut S>, id: u64), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64)>, - extern "C" fn(*const (), id: u64), - >, - ) -> EdgeHookId { - unsafe { - let gen = get_raw_hook!( - generation_hook, - edge_gen_hook_wrapper::, - unsafe extern "C" fn( - &mut HookState<1, EdgeHookId>, - src: GuestAddr, - dest: GuestAddr, - ) -> u64 - ); - let exec = get_raw_hook!( - execution_hook, - edge_0_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<1, EdgeHookId>, id: u64) - ); - EDGE_HOOKS.push(Box::pin(HookState { - id: EdgeHookId(0), - gen: hook_to_repr!(generation_hook), - post_gen: HookRepr::Empty, - execs: [hook_to_repr!(execution_hook)], - })); - let id = self.qemu.add_edge_hooks( - EDGE_HOOKS.last_mut().unwrap().as_mut().get_unchecked_mut(), - gen, - exec, - ); - EDGE_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .id = id; - id - } - } - - pub fn blocks( - &self, - generation_hook: Hook< - fn(&mut Self, Option<&mut S>, pc: GuestAddr) -> Option, - Box FnMut(&'a mut Self, Option<&'a mut S>, GuestAddr) -> Option>, - unsafe extern "C" fn(*const (), pc: GuestAddr) -> u64, - >, - post_generation_hook: Hook< - fn(&mut Self, Option<&mut S>, pc: GuestAddr, block_length: GuestUsize), - Box FnMut(&'a mut Self, Option<&mut S>, GuestAddr, GuestUsize)>, - unsafe extern "C" fn(*const (), pc: GuestAddr, block_length: GuestUsize), - >, - execution_hook: Hook< - fn(&mut Self, Option<&mut S>, id: u64), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64)>, - unsafe extern "C" fn(*const (), id: u64), - >, - ) -> BlockHookId { - unsafe { - let gen = get_raw_hook!( - generation_hook, - block_gen_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<1, BlockHookId>, pc: GuestAddr) -> u64 - ); - let postgen = get_raw_hook!( - post_generation_hook, - block_post_gen_hook_wrapper::, - unsafe extern "C" fn( - &mut HookState<1, BlockHookId>, - pc: GuestAddr, - block_length: GuestUsize, - ) - ); - let exec = get_raw_hook!( - execution_hook, - block_0_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<1, BlockHookId>, id: u64) - ); - BLOCK_HOOKS.push(Box::pin(HookState { - id: BlockHookId(0), - gen: hook_to_repr!(generation_hook), - post_gen: hook_to_repr!(post_generation_hook), - execs: [hook_to_repr!(execution_hook)], - })); - let id = self.qemu.add_block_hooks( - BLOCK_HOOKS.last_mut().unwrap().as_mut().get_unchecked_mut(), - gen, - postgen, - exec, - ); - BLOCK_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .id = id; - id - } - } - - #[allow(clippy::similar_names)] - pub fn reads( - &self, - generation_hook: Hook< - fn( - &mut Self, - Option<&mut S>, - pc: GuestAddr, - addr: *mut TCGTemp, - info: MemAccessInfo, - ) -> Option, - Box< - dyn for<'a> FnMut( - &'a mut Self, - Option<&'a mut S>, - GuestAddr, - *mut TCGTemp, - MemAccessInfo, - ) -> Option, - >, - unsafe extern "C" fn( - *const (), - pc: GuestAddr, - addr: *mut TCGTemp, - info: MemAccessInfo, - ) -> u64, - >, - execution_hook_1: Hook< - fn(&mut Self, Option<&mut S>, id: u64, addr: GuestAddr), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, GuestAddr)>, - unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr), - >, - execution_hook_2: Hook< - fn(&mut Self, Option<&mut S>, id: u64, addr: GuestAddr), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, GuestAddr)>, - unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr), - >, - execution_hook_4: Hook< - fn(&mut Self, Option<&mut S>, id: u64, addr: GuestAddr), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, GuestAddr)>, - unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr), - >, - execution_hook_8: Hook< - fn(&mut Self, Option<&mut S>, id: u64, addr: GuestAddr), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, GuestAddr)>, - unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr), - >, - execution_hook_n: Hook< - fn(&mut Self, Option<&mut S>, id: u64, addr: GuestAddr, size: usize), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, GuestAddr, usize)>, - unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr, size: usize), - >, - ) -> ReadHookId { - unsafe { - let gen = get_raw_hook!( - generation_hook, - read_gen_hook_wrapper::, - unsafe extern "C" fn( - &mut HookState<5, ReadHookId>, - pc: GuestAddr, - addr: *mut TCGTemp, - info: MemAccessInfo, - ) -> u64 - ); - let exec1 = get_raw_hook!( - execution_hook_1, - read_0_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<5, ReadHookId>, id: u64, addr: GuestAddr) - ); - let exec2 = get_raw_hook!( - execution_hook_2, - read_1_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<5, ReadHookId>, id: u64, addr: GuestAddr) - ); - let exec4 = get_raw_hook!( - execution_hook_4, - read_2_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<5, ReadHookId>, id: u64, addr: GuestAddr) - ); - let exec8 = get_raw_hook!( - execution_hook_8, - read_3_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<5, ReadHookId>, id: u64, addr: GuestAddr) - ); - let execn = get_raw_hook!( - execution_hook_n, - read_4_exec_hook_wrapper::, - unsafe extern "C" fn( - &mut HookState<5, ReadHookId>, - id: u64, - addr: GuestAddr, - size: usize, - ) - ); - READ_HOOKS.push(Box::pin(HookState { - id: ReadHookId(0), - gen: hook_to_repr!(generation_hook), - post_gen: HookRepr::Empty, - execs: [ - hook_to_repr!(execution_hook_1), - hook_to_repr!(execution_hook_2), - hook_to_repr!(execution_hook_4), - hook_to_repr!(execution_hook_8), - hook_to_repr!(execution_hook_n), - ], - })); - let id = self.qemu.add_read_hooks( - READ_HOOKS.last_mut().unwrap().as_mut().get_unchecked_mut(), - gen, - exec1, - exec2, - exec4, - exec8, - execn, - ); - READ_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .id = id; - id - } - } - - #[allow(clippy::similar_names)] - pub fn writes( - &self, - generation_hook: Hook< - fn( - &mut Self, - Option<&mut S>, - pc: GuestAddr, - addr: *mut TCGTemp, - info: MemAccessInfo, - ) -> Option, - Box< - dyn for<'a> FnMut( - &'a mut Self, - Option<&'a mut S>, - GuestAddr, - *mut TCGTemp, - MemAccessInfo, - ) -> Option, - >, - unsafe extern "C" fn( - *const (), - pc: GuestAddr, - addr: *mut TCGTemp, - info: MemAccessInfo, - ) -> u64, - >, - execution_hook_1: Hook< - fn(&mut Self, Option<&mut S>, id: u64, addr: GuestAddr), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, GuestAddr)>, - unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr), - >, - execution_hook_2: Hook< - fn(&mut Self, Option<&mut S>, id: u64, addr: GuestAddr), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, GuestAddr)>, - unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr), - >, - execution_hook_4: Hook< - fn(&mut Self, Option<&mut S>, id: u64, addr: GuestAddr), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, GuestAddr)>, - unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr), - >, - execution_hook_8: Hook< - fn(&mut Self, Option<&mut S>, id: u64, addr: GuestAddr), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, GuestAddr)>, - unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr), - >, - execution_hook_n: Hook< - fn(&mut Self, Option<&mut S>, id: u64, addr: GuestAddr, size: usize), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, GuestAddr, usize)>, - unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr, size: usize), - >, - ) -> WriteHookId { - unsafe { - let gen = get_raw_hook!( - generation_hook, - write_gen_hook_wrapper::, - unsafe extern "C" fn( - &mut HookState<5, WriteHookId>, - pc: GuestAddr, - addr: *mut TCGTemp, - info: MemAccessInfo, - ) -> u64 - ); - let exec1 = get_raw_hook!( - execution_hook_1, - write_0_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<5, WriteHookId>, id: u64, addr: GuestAddr) - ); - let exec2 = get_raw_hook!( - execution_hook_2, - write_1_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<5, WriteHookId>, id: u64, addr: GuestAddr) - ); - let exec4 = get_raw_hook!( - execution_hook_4, - write_2_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<5, WriteHookId>, id: u64, addr: GuestAddr) - ); - let exec8 = get_raw_hook!( - execution_hook_8, - write_3_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<5, WriteHookId>, id: u64, addr: GuestAddr) - ); - let execn = get_raw_hook!( - execution_hook_n, - write_4_exec_hook_wrapper::, - unsafe extern "C" fn( - &mut HookState<5, WriteHookId>, - id: u64, - addr: GuestAddr, - size: usize, - ) - ); - WRITE_HOOKS.push(Box::pin(HookState { - id: WriteHookId(0), - gen: hook_to_repr!(generation_hook), - post_gen: HookRepr::Empty, - execs: [ - hook_to_repr!(execution_hook_1), - hook_to_repr!(execution_hook_2), - hook_to_repr!(execution_hook_4), - hook_to_repr!(execution_hook_8), - hook_to_repr!(execution_hook_n), - ], - })); - let id = self.qemu.add_write_hooks( - WRITE_HOOKS.last_mut().unwrap().as_mut().get_unchecked_mut(), - gen, - exec1, - exec2, - exec4, - exec8, - execn, - ); - WRITE_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .id = id; - id - } - } - - pub fn cmps( - &self, - generation_hook: Hook< - fn(&mut Self, Option<&mut S>, pc: GuestAddr, size: usize) -> Option, - Box< - dyn for<'a> FnMut(&'a mut Self, Option<&'a mut S>, GuestAddr, usize) -> Option, - >, - unsafe extern "C" fn(*const (), pc: GuestAddr, size: usize) -> u64, - >, - execution_hook_1: Hook< - fn(&mut Self, Option<&mut S>, id: u64, v0: u8, v1: u8), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, u8, u8)>, - unsafe extern "C" fn(*const (), id: u64, v0: u8, v1: u8), - >, - execution_hook_2: Hook< - fn(&mut Self, Option<&mut S>, id: u64, v0: u16, v1: u16), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, u16, u16)>, - unsafe extern "C" fn(*const (), id: u64, v0: u16, v1: u16), - >, - execution_hook_4: Hook< - fn(&mut Self, Option<&mut S>, id: u64, v0: u32, v1: u32), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, u32, u32)>, - unsafe extern "C" fn(*const (), id: u64, v0: u32, v1: u32), - >, - execution_hook_8: Hook< - fn(&mut Self, Option<&mut S>, id: u64, v0: u64, v1: u64), - Box FnMut(&'a mut Self, Option<&'a mut S>, u64, u64, u64)>, - unsafe extern "C" fn(*const (), id: u64, v0: u64, v1: u64), - >, - ) -> CmpHookId { - unsafe { - let gen = get_raw_hook!( - generation_hook, - cmp_gen_hook_wrapper::, - unsafe extern "C" fn( - &mut HookState<4, CmpHookId>, - pc: GuestAddr, - size: usize, - ) -> u64 - ); - let exec1 = get_raw_hook!( - execution_hook_1, - cmp_0_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<4, CmpHookId>, id: u64, v0: u8, v1: u8) - ); - let exec2 = get_raw_hook!( - execution_hook_2, - cmp_1_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<4, CmpHookId>, id: u64, v0: u16, v1: u16) - ); - let exec4 = get_raw_hook!( - execution_hook_4, - cmp_2_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<4, CmpHookId>, id: u64, v0: u32, v1: u32) - ); - let exec8 = get_raw_hook!( - execution_hook_8, - cmp_3_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<4, CmpHookId>, id: u64, v0: u64, v1: u64) - ); - CMP_HOOKS.push(Box::pin(HookState { - id: CmpHookId(0), - gen: hook_to_repr!(generation_hook), - post_gen: HookRepr::Empty, - execs: [ - hook_to_repr!(execution_hook_1), - hook_to_repr!(execution_hook_2), - hook_to_repr!(execution_hook_4), - hook_to_repr!(execution_hook_8), - ], - })); - let id = self.qemu.add_cmp_hooks( - CMP_HOOKS.last_mut().unwrap().as_mut().get_unchecked_mut(), - gen, - exec1, - exec2, - exec4, - exec8, - ); - CMP_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .id = id; - id - } - } - - pub fn backdoor( - &self, - hook: Hook< - fn(&mut Self, Option<&mut S>, cpu: CPUArchStatePtr, GuestAddr), - Box FnMut(&'a mut Self, Option<&'a mut S>, GuestAddr)>, - extern "C" fn(*const (), cpu: CPUArchStatePtr, pc: GuestAddr), - >, - ) -> BackdoorHookId { - match hook { - Hook::Function(f) => self.backdoor_function(f), - Hook::Closure(c) => self.backdoor_closure(c), - Hook::Raw(r) => { - let z: *const () = ptr::null::<()>(); - self.qemu.add_backdoor_hook(z, r) - } - Hook::Empty => BackdoorHookId(0), // TODO error type - } - } - - pub fn backdoor_function( - &self, - hook: fn(&mut Self, Option<&mut S>, cpu: CPUArchStatePtr, pc: GuestAddr), - ) -> BackdoorHookId { - unsafe { - self.qemu - .add_backdoor_hook(transmute(hook), func_backdoor_hook_wrapper::) - } - } - - pub fn backdoor_closure( - &self, - hook: Box FnMut(&'a mut Self, Option<&'a mut S>, GuestAddr)>, - ) -> BackdoorHookId { - unsafe { - let fat: FatPtr = transmute(hook); - BACKDOOR_HOOKS.push(Box::pin((BackdoorHookId(0), fat))); - let id = self.qemu.add_backdoor_hook( - &mut BACKDOOR_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .1, - closure_backdoor_hook_wrapper::, - ); - BACKDOOR_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .0 = id; - id - } - } - - #[cfg(emulation_mode = "usermode")] - #[allow(clippy::type_complexity)] - pub fn syscalls( - &self, - hook: Hook< - fn( - &mut Self, - Option<&mut S>, - sys_num: i32, - a0: GuestAddr, - a1: GuestAddr, - a2: GuestAddr, - a3: GuestAddr, - a4: GuestAddr, - a5: GuestAddr, - a6: GuestAddr, - a7: GuestAddr, - ) -> SyscallHookResult, - Box< - dyn for<'a> FnMut( - &'a mut Self, - Option<&'a mut S>, - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> SyscallHookResult, - >, - extern "C" fn( - *const (), - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> SyscallHookResult, - >, - ) -> PreSyscallHookId { - match hook { - Hook::Function(f) => self.syscalls_function(f), - Hook::Closure(c) => self.syscalls_closure(c), - Hook::Raw(r) => { - let z: *const () = ptr::null::<()>(); - self.qemu.add_pre_syscall_hook(z, r) - } - Hook::Empty => PreSyscallHookId(0), // TODO error type - } - } - - #[cfg(emulation_mode = "usermode")] - #[allow(clippy::type_complexity)] - pub fn syscalls_function( - &self, - hook: fn( - &mut Self, - Option<&mut S>, - sys_num: i32, - a0: GuestAddr, - a1: GuestAddr, - a2: GuestAddr, - a3: GuestAddr, - a4: GuestAddr, - a5: GuestAddr, - a6: GuestAddr, - a7: GuestAddr, - ) -> SyscallHookResult, - ) -> PreSyscallHookId { - unsafe { - self.qemu - .add_pre_syscall_hook(transmute(hook), func_pre_syscall_hook_wrapper::) - } - } - - #[cfg(emulation_mode = "usermode")] - #[allow(clippy::type_complexity)] - pub fn syscalls_closure( - &self, - hook: Box< - dyn for<'a> FnMut( - &'a mut Self, - Option<&'a mut S>, - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> SyscallHookResult, - >, - ) -> PreSyscallHookId { - unsafe { - let fat: FatPtr = transmute(hook); - PRE_SYSCALL_HOOKS.push(Box::pin((PreSyscallHookId(0), fat))); - let id = self.qemu.add_pre_syscall_hook( - &mut PRE_SYSCALL_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .1, - closure_pre_syscall_hook_wrapper::, - ); - PRE_SYSCALL_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .0 = id; - id - } - } - - #[cfg(emulation_mode = "usermode")] - #[allow(clippy::type_complexity)] - pub fn after_syscalls( - &self, - hook: Hook< - fn( - &mut Self, - Option<&mut S>, - res: GuestAddr, - sys_num: i32, - a0: GuestAddr, - a1: GuestAddr, - a2: GuestAddr, - a3: GuestAddr, - a4: GuestAddr, - a5: GuestAddr, - a6: GuestAddr, - a7: GuestAddr, - ) -> GuestAddr, - Box< - dyn for<'a> FnMut( - &'a mut Self, - Option<&mut S>, - GuestAddr, - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> GuestAddr, - >, - extern "C" fn( - *const (), - GuestAddr, - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> GuestAddr, - >, - ) -> PostSyscallHookId { - match hook { - Hook::Function(f) => self.after_syscalls_function(f), - Hook::Closure(c) => self.after_syscalls_closure(c), - Hook::Raw(r) => { - let z: *const () = ptr::null::<()>(); - self.qemu.add_post_syscall_hook(z, r) - } - Hook::Empty => PostSyscallHookId(0), // TODO error type - } - } - - #[cfg(emulation_mode = "usermode")] - #[allow(clippy::type_complexity)] - pub fn after_syscalls_function( - &self, - hook: fn( - &mut Self, - Option<&mut S>, - res: GuestAddr, - sys_num: i32, - a0: GuestAddr, - a1: GuestAddr, - a2: GuestAddr, - a3: GuestAddr, - a4: GuestAddr, - a5: GuestAddr, - a6: GuestAddr, - a7: GuestAddr, - ) -> GuestAddr, - ) -> PostSyscallHookId { - unsafe { - self.qemu - .add_post_syscall_hook(transmute(hook), func_post_syscall_hook_wrapper::) - } - } - - #[cfg(emulation_mode = "usermode")] - #[allow(clippy::type_complexity)] - pub fn after_syscalls_closure( - &self, - hook: Box< - dyn for<'a> FnMut( - &'a mut Self, - Option<&mut S>, - GuestAddr, - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> GuestAddr, - >, - ) -> PostSyscallHookId { - unsafe { - let fat: FatPtr = transmute(hook); - POST_SYSCALL_HOOKS.push(Box::pin((PostSyscallHookId(0), fat))); - let id = self.qemu.add_post_syscall_hook( - &mut POST_SYSCALL_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .1, - closure_post_syscall_hook_wrapper::, - ); - POST_SYSCALL_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .0 = id; - id - } - } - - #[cfg(emulation_mode = "usermode")] - pub fn thread_creation( - &self, - hook: Hook< - fn(&mut Self, Option<&mut S>, tid: u32) -> bool, - Box FnMut(&'a mut Self, Option<&'a mut S>, u32) -> bool>, - extern "C" fn(*const (), tid: u32) -> bool, - >, - ) -> NewThreadHookId { - match hook { - Hook::Function(f) => self.thread_creation_function(f), - Hook::Closure(c) => self.thread_creation_closure(c), - Hook::Raw(r) => { - let z: *const () = ptr::null::<()>(); - self.qemu.add_new_thread_hook(z, r) - } - Hook::Empty => NewThreadHookId(0), // TODO error type - } - } - - #[cfg(emulation_mode = "usermode")] - pub fn thread_creation_function( - &self, - hook: fn(&mut Self, Option<&mut S>, tid: u32) -> bool, - ) -> NewThreadHookId { - unsafe { - self.qemu - .add_new_thread_hook(transmute(hook), func_new_thread_hook_wrapper::) - } - } - - #[cfg(emulation_mode = "usermode")] - pub fn thread_creation_closure( - &self, - hook: Box FnMut(&'a mut Self, Option<&'a mut S>, u32) -> bool>, - ) -> NewThreadHookId { - unsafe { - let fat: FatPtr = transmute(hook); - NEW_THREAD_HOOKS.push(Box::pin((NewThreadHookId(0), fat))); - let id = self.qemu.add_new_thread_hook( - &mut NEW_THREAD_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .1, - closure_new_thread_hook_wrapper::, - ); - NEW_THREAD_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .0 = id; - id - } - } - - #[cfg(emulation_mode = "usermode")] - pub fn crash_function(&self, hook: fn(&mut Self, target_signal: i32)) { - unsafe { - self.qemu.set_crash_hook(crash_hook_wrapper::); - CRASH_HOOKS.push(HookRepr::Function(hook as *const libc::c_void)); - } - } - - #[cfg(emulation_mode = "usermode")] - pub fn crash_closure(&self, hook: Box) { - unsafe { - self.qemu.set_crash_hook(crash_hook_wrapper::); - CRASH_HOOKS.push(HookRepr::Closure(transmute(hook))); - } - } - - pub fn jmps( - &self, - generation_hook: Hook< - fn(&mut Self, Option<&mut S>, src: GuestAddr, dest: GuestAddr) -> Option, - Box< - dyn for<'a> FnMut( - &'a mut Self, - Option<&'a mut S>, - GuestAddr, - GuestAddr, - ) -> Option, - >, - extern "C" fn(*const (), src: GuestAddr, dest: GuestAddr) -> u64, - >, - execution_hook: Hook< - fn(&mut Self, Option<&mut S>, src: GuestAddr, dest: GuestAddr, id: u64), - Box FnMut(&'a mut Self, Option<&'a mut S>, GuestAddr, GuestAddr, u64)>, - extern "C" fn(*const (), src: GuestAddr, dest: GuestAddr, id: u64), - >, - ) -> JmpHookId { - unsafe { - let gen = get_raw_hook!( - generation_hook, - jmp_gen_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<1, JmpHookId>, src: GuestAddr, dest: GuestAddr) -> u64 - ); - let exec = get_raw_hook!( - execution_hook, - jmp_0_exec_hook_wrapper::, - unsafe extern "C" fn(&mut HookState<1, JmpHookId>, src: GuestAddr, dest: GuestAddr, id: u64) - ); - JMP_HOOKS.push(Box::pin(HookState { - id: JmpHookId(0), - gen: hook_to_repr!(generation_hook), - post_gen: HookRepr::Empty, - execs: [hook_to_repr!(execution_hook)], - })); - let id = self - .qemu - .add_jmp_hooks(JMP_HOOKS.last_mut().unwrap().as_mut().get_unchecked_mut(), - gen, - exec - ); - JMP_HOOKS - .last_mut() - .unwrap() - .as_mut() - .get_unchecked_mut() - .id = id; - id - } - } -} diff --git a/libafl_qemu/src/lib.rs b/libafl_qemu/src/lib.rs index ad21721b2d..6f27560e56 100644 --- a/libafl_qemu/src/lib.rs +++ b/libafl_qemu/src/lib.rs @@ -1,10 +1,9 @@ //! Welcome to `LibAFL` QEMU //! //! __Warning__: The documentation is built by default for `x86_64` in `usermode`. To access the documentation of other architectures or `systemmode`, the documentation must be rebuilt with the right features. -#![doc = include_str!("../../README.md")] /*! */ +#![doc = include_str!("../README.md")] #![cfg_attr(feature = "document-features", doc = document_features::document_features!())] -#![forbid(unexpected_cfgs)] // libafl_qemu only supports Linux currently #![cfg(target_os = "linux")] // This lint triggers too often on the current GuestAddr type when emulating 64-bit targets because @@ -13,16 +12,6 @@ any(cpu_target = "x86_64", cpu_target = "aarch64"), allow(clippy::useless_conversion) )] -#![allow(clippy::needless_pass_by_value)] -#![allow(clippy::needless_pass_by_ref_mut)] -#![allow(clippy::transmute_ptr_to_ptr)] -#![allow(clippy::ptr_cast_constness)] -#![allow(clippy::too_many_arguments)] -// Till they fix this buggy lint in clippy -#![allow(clippy::borrow_as_ptr)] -#![allow(clippy::borrow_deref_ref)] -// Allow only ATM, it will be evetually removed -#![allow(clippy::missing_safety_doc)] // libafl_qemu_sys export types with empty struct markers (e.g. struct {} start_init_save) // This causes bindgen to generate empty Rust struct that are generally not FFI-safe due to C++ having empty structs with size 1 // As the QEMU codebase is C, it is FFI-safe and we just ignore the warning @@ -38,11 +27,7 @@ pub use arch::*; pub mod elf; -pub mod helpers; -pub use helpers::*; - -pub mod hooks; -pub use hooks::*; +pub mod modules; pub mod executor; pub use executor::QemuExecutor; @@ -59,6 +44,10 @@ pub mod breakpoint; pub mod command; pub mod sync_exit; +pub use libafl_qemu_sys::{GuestAddr, MmapPerms}; +#[cfg(feature = "systemmode")] +pub use libafl_qemu_sys::{GuestPhysAddr, GuestVirtAddr}; + #[must_use] pub fn filter_qemu_args() -> Vec { let mut args = vec![env::args().next().unwrap()]; @@ -83,28 +72,31 @@ use pyo3::prelude::*; #[pymodule] #[pyo3(name = "libafl_qemu")] #[allow(clippy::items_after_statements, clippy::too_many_lines)] -pub fn python_module(py: Python, m: &PyModule) -> PyResult<()> { - let regsm = PyModule::new(py, "regs")?; +pub fn python_module(m: &Bound<'_, PyModule>) -> PyResult<()> { + use pyo3::types::PyString; + + let regsm = PyModule::new(m.py(), "regs")?; for r in Regs::iter() { let v: i32 = r.into(); - regsm.add(&format!("{r:?}"), v)?; + regsm.add(PyString::new(m.py(), &format!("{r:?}")), v)?; } - m.add_submodule(regsm)?; + m.add_submodule(®sm)?; - let mmapm = PyModule::new(py, "mmap")?; - for r in sys::MmapPerms::iter() { + let mmapm = PyModule::new(m.py(), "mmap")?; + for r in MmapPerms::iter() { let v: i32 = r.into(); - mmapm.add(&format!("{r:?}"), v)?; + mmapm.add(PyString::new(m.py(), &format!("{r:?}")), v)?; } - m.add_submodule(mmapm)?; + m.add_submodule(&mmapm)?; + #[cfg(feature = "usermode")] m.add_class::()?; - #[cfg(emulation_mode = "usermode")] - m.add_class::()?; + #[cfg(feature = "usermode")] + m.add_class::()?; - m.add_class::()?; - m.add_class::()?; + m.add_class::()?; + m.add_class::()?; Ok(()) } diff --git a/libafl_qemu/src/helpers/calls.rs b/libafl_qemu/src/modules/calls.rs similarity index 51% rename from libafl_qemu/src/helpers/calls.rs rename to libafl_qemu/src/modules/calls.rs index c777599297..1e280a0720 100644 --- a/libafl_qemu/src/helpers/calls.rs +++ b/libafl_qemu/src/modules/calls.rs @@ -10,39 +10,39 @@ use libafl_bolts::tuples::{Handle, Handled, MatchFirstType, MatchNameRef}; use libafl_qemu_sys::GuestAddr; use thread_local::ThreadLocal; +#[cfg(feature = "systemmode")] +use crate::modules::{NopPageFilter, NOP_PAGE_FILTER}; use crate::{ capstone, - helpers::{ - HasInstrumentationFilter, IsFilter, QemuHelper, QemuHelperTuple, - QemuInstrumentationAddressRangeFilter, + modules::{ + AddressFilter, EmulatorModule, EmulatorModuleTuple, EmulatorModules, StdAddressFilter, }, - hooks::{Hook, QemuHooks}, - qemu::ArchExtras, + qemu::{ArchExtras, Hook}, Qemu, }; pub trait CallTraceCollector: 'static { - fn on_call( + fn on_call( &mut self, - hooks: &mut QemuHooks, + emulator_modules: &mut EmulatorModules, state: Option<&mut S>, pc: GuestAddr, call_len: usize, ) where - S: UsesInput, - QT: QemuHelperTuple; + S: Unpin + UsesInput, + ET: EmulatorModuleTuple; - fn on_ret( + fn on_ret( &mut self, - hooks: &mut QemuHooks, + emulator_modules: &mut EmulatorModules, state: Option<&mut S>, pc: GuestAddr, ret_addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple; + S: Unpin + UsesInput, + ET: EmulatorModuleTuple; - // Frowarded from the `QemuCallTracerHelper` + // Frowarded from the `CallTracerModule` fn pre_exec(&mut self, _qemu: Qemu, _input: &I) where I: Input, @@ -56,32 +56,32 @@ pub trait CallTraceCollector: 'static { _observers: &mut OT, _exit_kind: &mut ExitKind, ) where - OT: ObserversTuple, - S: UsesInput, + OT: ObserversTuple, + S: Unpin + UsesInput, { } } pub trait CallTraceCollectorTuple: 'static + MatchFirstType { - fn on_call_all( + fn on_call_all( &mut self, - hooks: &mut QemuHooks, + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, pc: GuestAddr, call_len: usize, ) where - S: UsesInput, - QT: QemuHelperTuple; + S: Unpin + UsesInput, + ET: EmulatorModuleTuple; - fn on_ret_all( + fn on_ret_all( &mut self, - hooks: &mut QemuHooks, + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, _pc: GuestAddr, ret_addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple; + S: Unpin + UsesInput, + ET: EmulatorModuleTuple; fn pre_exec_all(&mut self, _qemu: Qemu, input: &I) where @@ -94,32 +94,32 @@ pub trait CallTraceCollectorTuple: 'static + MatchFirstType { _observers: &mut OT, _exit_kind: &mut ExitKind, ) where - OT: ObserversTuple, - S: UsesInput; + OT: ObserversTuple, + S: Unpin + UsesInput; } impl CallTraceCollectorTuple for () { - fn on_call_all( + fn on_call_all( &mut self, - _hooks: &mut QemuHooks, + _emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, _pc: GuestAddr, _call_len: usize, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { } - fn on_ret_all( + fn on_ret_all( &mut self, - _hooks: &mut QemuHooks, + _emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, _pc: GuestAddr, _ret_addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { } @@ -136,8 +136,8 @@ impl CallTraceCollectorTuple for () { _observers: &mut OT, _exit_kind: &mut ExitKind, ) where - OT: ObserversTuple, - S: UsesInput, + OT: ObserversTuple, + S: Unpin + UsesInput, { } } @@ -147,18 +147,18 @@ where Head: CallTraceCollector, Tail: CallTraceCollectorTuple, { - fn on_call_all( + fn on_call_all( &mut self, - hooks: &mut QemuHooks, + emulator_modules: &mut EmulatorModules, mut state: Option<&mut S>, pc: GuestAddr, call_len: usize, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { self.0.on_call( - hooks, + emulator_modules, match state.as_mut() { Some(s) => Some(*s), None => None, @@ -166,21 +166,21 @@ where pc, call_len, ); - self.1.on_call_all(hooks, state, pc, call_len); + self.1.on_call_all(emulator_modules, state, pc, call_len); } - fn on_ret_all( + fn on_ret_all( &mut self, - hooks: &mut QemuHooks, + emulator_modules: &mut EmulatorModules, mut state: Option<&mut S>, pc: GuestAddr, ret_addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { self.0.on_ret( - hooks, + emulator_modules, match state.as_mut() { Some(s) => Some(*s), None => None, @@ -188,7 +188,7 @@ where pc, ret_addr, ); - self.1.on_ret_all(hooks, state, pc, ret_addr); + self.1.on_ret_all(emulator_modules, state, pc, ret_addr); } fn pre_exec_all(&mut self, qemu: Qemu, input: &I) @@ -206,8 +206,8 @@ where observers: &mut OT, exit_kind: &mut ExitKind, ) where - OT: ObserversTuple, - S: UsesInput, + OT: ObserversTuple, + S: Unpin + UsesInput, { self.0.post_exec(qemu, input, observers, exit_kind); self.1.post_exec_all(qemu, input, observers, exit_kind); @@ -215,21 +215,21 @@ where } #[derive(Debug)] -pub struct QemuCallTracerHelper +pub struct CallTracerModule where T: CallTraceCollectorTuple, { - filter: QemuInstrumentationAddressRangeFilter, + filter: StdAddressFilter, cs: Capstone, collectors: Option, } -impl QemuCallTracerHelper +impl CallTracerModule where - T: CallTraceCollectorTuple, + T: CallTraceCollectorTuple + Debug, { #[must_use] - pub fn new(filter: QemuInstrumentationAddressRangeFilter, collectors: T) -> Self { + pub fn new(filter: StdAddressFilter, collectors: T) -> Self { Self { filter, cs: capstone().detail(true).build().unwrap(), @@ -239,19 +239,22 @@ where #[must_use] pub fn must_instrument(&self, addr: GuestAddr) -> bool { - self.filter.allowed(addr) + self.filter.allowed(&addr) } - fn on_ret(hooks: &mut QemuHooks, state: Option<&mut S>, pc: GuestAddr) - where - S: UsesInput, - QT: QemuHelperTuple, + fn on_ret( + emulator_modules: &mut EmulatorModules, + state: Option<&mut S>, + pc: GuestAddr, + ) where + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let ret_addr: GuestAddr = hooks.qemu().read_return_address().unwrap(); + let ret_addr: GuestAddr = emulator_modules.qemu().read_return_address().unwrap(); // log::info!("RET @ 0x{:#x}", ret_addr); - let mut collectors = if let Some(h) = hooks.helpers_mut().match_first_type_mut::() { + let mut collectors = if let Some(h) = emulator_modules.get_mut::() { h.collectors.take() } else { return; @@ -262,24 +265,20 @@ where collectors .as_mut() .unwrap() - .on_ret_all(hooks, state, pc, ret_addr); - hooks - .helpers_mut() - .match_first_type_mut::() - .unwrap() - .collectors = collectors; + .on_ret_all(emulator_modules, state, pc, ret_addr); + emulator_modules.get_mut::().unwrap().collectors = collectors; } - fn gen_blocks_calls( - hooks: &mut QemuHooks, + fn gen_blocks_calls( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, pc: GuestAddr, ) -> Option where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - if let Some(h) = hooks.helpers_mut().match_first_type_mut::() { + if let Some(h) = emulator_modules.get_mut::() { if !h.must_instrument(pc) { return None; } @@ -293,22 +292,27 @@ where .unwrap(); } - let emu = hooks.qemu(); + let qemu = emulator_modules.qemu(); - if let Some(h) = hooks.helpers().match_first_type::() { + let mut call_addrs: Vec<(GuestAddr, usize)> = Vec::new(); + let mut ret_addrs: Vec = Vec::new(); + + if let Some(h) = emulator_modules.modules().match_first_type::() { #[allow(unused_mut)] let mut code = { - #[cfg(emulation_mode = "usermode")] + #[cfg(feature = "usermode")] unsafe { - std::slice::from_raw_parts(emu.g2h(pc), 512) + std::slice::from_raw_parts(qemu.g2h(pc), 512) } - #[cfg(emulation_mode = "systemmode")] + #[cfg(feature = "systemmode")] &mut [0; 512] }; - #[cfg(emulation_mode = "systemmode")] - unsafe { - emu.read_mem(pc, code) - }; // TODO handle faults + #[cfg(feature = "systemmode")] + if let Err(err) = qemu.read_mem(pc, code) { + // TODO handle faults + log::error!("gen_block_calls: Failed to read mem at pc {pc:#x}: {err:?}"); + return None; + } let mut iaddr = pc; @@ -322,39 +326,10 @@ where match u32::from(detail.0) { capstone::InsnGroupType::CS_GRP_CALL => { let call_len = insn.bytes().len(); - // TODO do not use a closure, find a more efficient way to pass call_len - let call_cb = Box::new( - move |hooks: &mut QemuHooks, state: Option<&mut S>, pc| { - // eprintln!("CALL @ 0x{:#x}", pc + call_len); - let mut collectors = if let Some(h) = - hooks.helpers_mut().match_first_type_mut::() - { - h.collectors.take() - } else { - return; - }; - if collectors.is_none() { - return; // TODO fix this, it can be None on races ret - } - collectors - .as_mut() - .unwrap() - .on_call_all(hooks, state, pc, call_len); - hooks - .helpers_mut() - .match_first_type_mut::() - .unwrap() - .collectors = collectors; - }, - ); - hooks.instruction_closure(insn.address() as GuestAddr, call_cb, false); + call_addrs.push((insn.address() as GuestAddr, call_len)); } capstone::InsnGroupType::CS_GRP_RET => { - hooks.instruction_function( - insn.address() as GuestAddr, - Self::on_ret, - false, - ); + ret_addrs.push(insn.address() as GuestAddr); break 'disasm; } capstone::InsnGroupType::CS_GRP_INVALID @@ -369,67 +344,121 @@ where iaddr += insn.bytes().len() as GuestAddr; - #[cfg(emulation_mode = "usermode")] + #[cfg(feature = "usermode")] unsafe { - code = std::slice::from_raw_parts(emu.g2h(iaddr), 512); + code = std::slice::from_raw_parts(qemu.g2h(iaddr), 512); + } + #[cfg(feature = "systemmode")] + if let Err(err) = qemu.read_mem(pc, code) { + // TODO handle faults + log::error!( + "gen_block_calls error 2: Failed to read mem at pc {pc:#x}: {err:?}" + ); + return None; } - #[cfg(emulation_mode = "systemmode")] - unsafe { - emu.read_mem(pc, code); - } // TODO handle faults } } + for (call_addr, call_len) in call_addrs { + // TODO do not use a closure, find a more efficient way to pass call_len + let call_cb = Box::new( + move |emulator_modules: &mut EmulatorModules, state: Option<&mut S>, pc| { + // eprintln!("CALL @ 0x{:#x}", pc + call_len); + let mut collectors = if let Some(h) = emulator_modules.get_mut::() { + h.collectors.take() + } else { + return; + }; + if collectors.is_none() { + return; // TODO fix this, it can be None on races ret + } + collectors + .as_mut() + .unwrap() + .on_call_all(emulator_modules, state, pc, call_len); + emulator_modules.get_mut::().unwrap().collectors = collectors; + }, + ); + emulator_modules.instruction_closure(call_addr, call_cb, false); + } + + for ret_addr in ret_addrs { + emulator_modules.instruction_function(ret_addr, Self::on_ret, false); + } + None } } -impl HasInstrumentationFilter for QemuCallTracerHelper +impl EmulatorModule for CallTracerModule where - T: CallTraceCollectorTuple, -{ - fn filter(&self) -> &QemuInstrumentationAddressRangeFilter { - &self.filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationAddressRangeFilter { - &mut self.filter - } -} - -impl QemuHelper for QemuCallTracerHelper -where - S: UsesInput, + S: Unpin + UsesInput, T: CallTraceCollectorTuple + Debug, { - fn init_hooks(&self, hooks: &QemuHooks) + type ModuleAddressFilter = StdAddressFilter; + #[cfg(feature = "systemmode")] + type ModulePageFilter = NopPageFilter; + + fn post_qemu_init(&self, emulator_modules: &mut EmulatorModules) where - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, { - hooks.blocks( - Hook::Function(Self::gen_blocks_calls::), + emulator_modules.blocks( + Hook::Function(Self::gen_blocks_calls::), Hook::Empty, Hook::Empty, ); } - fn pre_exec(&mut self, qemu: Qemu, input: &S::Input) { - self.collectors.as_mut().unwrap().pre_exec_all(qemu, input); - } - - fn post_exec( + fn pre_exec( &mut self, - qemu: Qemu, + emulator_modules: &mut EmulatorModules, + _state: &mut S, input: &S::Input, - observers: &mut OT, - exit_kind: &mut ExitKind, ) where - OT: ObserversTuple, + ET: EmulatorModuleTuple, { self.collectors .as_mut() .unwrap() - .post_exec_all(qemu, input, observers, exit_kind); + .pre_exec_all(emulator_modules.qemu(), input); + } + + fn post_exec( + &mut self, + emulator_modules: &mut EmulatorModules, + _state: &mut S, + input: &S::Input, + observers: &mut OT, + exit_kind: &mut ExitKind, + ) where + OT: ObserversTuple, + ET: EmulatorModuleTuple, + { + self.collectors.as_mut().unwrap().post_exec_all( + emulator_modules.qemu(), + input, + observers, + exit_kind, + ); + } + + fn address_filter(&self) -> &Self::ModuleAddressFilter { + &self.filter + } + + fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter { + &mut self.filter + } + + #[cfg(feature = "systemmode")] + fn page_filter(&self) -> &Self::ModulePageFilter { + &NopPageFilter + } + + #[cfg(feature = "systemmode")] + fn page_filter_mut(&mut self) -> &mut Self::ModulePageFilter { + unsafe { (&raw mut NOP_PAGE_FILTER).as_mut().unwrap().get_mut() } } } @@ -464,29 +493,29 @@ where 'a: 'static, { #[allow(clippy::unnecessary_cast)] - fn on_call( + fn on_call( &mut self, - _hooks: &mut QemuHooks, + _emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, pc: GuestAddr, call_len: usize, ) where - S: UsesInput, - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, { self.callstack_hash ^= pc as u64 + call_len as u64; } #[allow(clippy::unnecessary_cast)] - fn on_ret( + fn on_ret( &mut self, - _hooks: &mut QemuHooks, + _emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, _pc: GuestAddr, ret_addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, { self.callstack_hash ^= ret_addr as u64; } @@ -505,8 +534,8 @@ where observers: &mut OT, exit_kind: &mut ExitKind, ) where - OT: ObserversTuple, - S: UsesInput, + OT: ObserversTuple, + S: Unpin + UsesInput, { let observer = observers .get_mut(&self.observer_handle) @@ -520,29 +549,35 @@ static mut CALLSTACKS: Option>>> = None; #[derive(Debug)] pub struct FullBacktraceCollector {} -impl Default for FullBacktraceCollector { - fn default() -> Self { - Self::new() - } -} - impl FullBacktraceCollector { - pub fn new() -> Self { - unsafe { CALLSTACKS = Some(ThreadLocal::new()) }; + /// # Safety + /// This accesses the global [`CALLSTACKS`] variable and may not be called concurrently. + pub unsafe fn new() -> Self { + let callstacks_ptr = &raw mut CALLSTACKS; + unsafe { (*callstacks_ptr) = Some(ThreadLocal::new()) }; Self {} } pub fn reset(&mut self) { + // # Safety + // This accesses the global [`CALLSTACKS`] variable. + // While it is racey, it might be fine if multiple clear the vecs concurrently. + // TODO: This should probably be rewritten in a safer way. + let callstacks_ptr = &raw mut CALLSTACKS; unsafe { - for tls in CALLSTACKS.as_mut().unwrap().iter_mut() { + for tls in (*callstacks_ptr).as_mut().unwrap().iter_mut() { (*tls.get()).clear(); } } } pub fn backtrace() -> Option<&'static [GuestAddr]> { + // # Safety + // This accesses the global [`CALLSTACKS`] variable. + // However, the actual variable access is behind a `ThreadLocal` class. + let callstacks_ptr = &raw mut CALLSTACKS; unsafe { - if let Some(c) = CALLSTACKS.as_mut() { + if let Some(c) = (*callstacks_ptr).as_mut() { Some(&*c.get_or_default().get()) } else { None @@ -553,35 +588,38 @@ impl FullBacktraceCollector { impl CallTraceCollector for FullBacktraceCollector { #[allow(clippy::unnecessary_cast)] - fn on_call( + fn on_call( &mut self, - _hooks: &mut QemuHooks, + _emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, pc: GuestAddr, call_len: usize, ) where - S: UsesInput, - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, { + let callstacks_ptr = &raw mut CALLSTACKS; // TODO handle Thumb unsafe { - (*CALLSTACKS.as_mut().unwrap().get_or_default().get()).push(pc + call_len as GuestAddr); + (*(*callstacks_ptr).as_mut().unwrap().get_or_default().get()) + .push(pc + call_len as GuestAddr); } } #[allow(clippy::unnecessary_cast)] - fn on_ret( + fn on_ret( &mut self, - _hooks: &mut QemuHooks, + _emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, _pc: GuestAddr, ret_addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, { + let callstacks_ptr = &raw mut CALLSTACKS; unsafe { - let v = &mut *CALLSTACKS.as_mut().unwrap().get_or_default().get(); + let v = &mut *(*callstacks_ptr).as_mut().unwrap().get_or_default().get(); if !v.is_empty() { // if *v.last().unwrap() == ret_addr { // v.pop(); diff --git a/libafl_qemu/src/helpers/cmplog.rs b/libafl_qemu/src/modules/cmplog.rs similarity index 55% rename from libafl_qemu/src/helpers/cmplog.rs rename to libafl_qemu/src/modules/cmplog.rs index e637cfe6a1..b595971374 100644 --- a/libafl_qemu/src/helpers/cmplog.rs +++ b/libafl_qemu/src/modules/cmplog.rs @@ -1,4 +1,4 @@ -#[cfg(emulation_mode = "usermode")] +#[cfg(feature = "usermode")] use capstone::{arch::BuildsCapstone, Capstone, InsnDetail}; use hashbrown::HashMap; use libafl::{inputs::UsesInput, HasMetadata}; @@ -11,14 +11,14 @@ pub use libafl_targets::{ }; use serde::{Deserialize, Serialize}; -#[cfg(emulation_mode = "usermode")] +#[cfg(feature = "systemmode")] +use crate::modules::{NopPageFilter, NOP_PAGE_FILTER}; +#[cfg(feature = "usermode")] use crate::{capstone, qemu::ArchExtras, CallingConvention, Qemu}; use crate::{ - helpers::{ - hash_me, HasInstrumentationFilter, IsFilter, QemuHelper, QemuHelperTuple, - QemuInstrumentationAddressRangeFilter, - }, - hooks::{Hook, QemuHooks}, + emu::EmulatorModules, + modules::{hash_me, AddressFilter, EmulatorModule, EmulatorModuleTuple, StdAddressFilter}, + qemu::Hook, }; #[cfg_attr( @@ -44,112 +44,144 @@ impl QemuCmpsMapMetadata { libafl_bolts::impl_serdeany!(QemuCmpsMapMetadata); #[derive(Debug)] -pub struct QemuCmpLogHelper { - filter: QemuInstrumentationAddressRangeFilter, +pub struct CmpLogModule { + address_filter: StdAddressFilter, } -impl QemuCmpLogHelper { +impl CmpLogModule { #[must_use] - pub fn new(filter: QemuInstrumentationAddressRangeFilter) -> Self { - Self { filter } + pub fn new(address_filter: StdAddressFilter) -> Self { + Self { address_filter } } #[must_use] pub fn must_instrument(&self, addr: GuestAddr) -> bool { - self.filter.allowed(addr) + self.address_filter.allowed(&addr) } } -impl Default for QemuCmpLogHelper { +impl Default for CmpLogModule { fn default() -> Self { - Self::new(QemuInstrumentationAddressRangeFilter::None) + Self::new(StdAddressFilter::default()) } } -impl HasInstrumentationFilter for QemuCmpLogHelper { - fn filter(&self) -> &QemuInstrumentationAddressRangeFilter { - &self.filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationAddressRangeFilter { - &mut self.filter - } -} - -impl QemuHelper for QemuCmpLogHelper +impl EmulatorModule for CmpLogModule where - S: UsesInput + HasMetadata, + S: Unpin + UsesInput + HasMetadata, { - fn first_exec(&self, hooks: &QemuHooks) + type ModuleAddressFilter = StdAddressFilter; + #[cfg(feature = "systemmode")] + type ModulePageFilter = NopPageFilter; + + fn first_exec(&mut self, emulator_modules: &mut EmulatorModules, _state: &mut S) where - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, { - hooks.cmps( - Hook::Function(gen_unique_cmp_ids::), + emulator_modules.cmps( + Hook::Function(gen_unique_cmp_ids::), Hook::Raw(trace_cmp1_cmplog), Hook::Raw(trace_cmp2_cmplog), Hook::Raw(trace_cmp4_cmplog), Hook::Raw(trace_cmp8_cmplog), ); } + + fn address_filter(&self) -> &Self::ModuleAddressFilter { + &self.address_filter + } + + fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter { + &mut self.address_filter + } + + #[cfg(feature = "systemmode")] + fn page_filter(&self) -> &Self::ModulePageFilter { + &NopPageFilter + } + + #[cfg(feature = "systemmode")] + fn page_filter_mut(&mut self) -> &mut Self::ModulePageFilter { + unsafe { (&raw mut NOP_PAGE_FILTER).as_mut().unwrap().get_mut() } + } } #[derive(Debug)] -pub struct QemuCmpLogChildHelper { - filter: QemuInstrumentationAddressRangeFilter, +pub struct CmpLogChildModule { + address_filter: StdAddressFilter, } -impl QemuCmpLogChildHelper { +impl CmpLogChildModule { #[must_use] - pub fn new(filter: QemuInstrumentationAddressRangeFilter) -> Self { - Self { filter } + pub fn new(address_filter: StdAddressFilter) -> Self { + Self { address_filter } } #[must_use] pub fn must_instrument(&self, addr: GuestAddr) -> bool { - self.filter.allowed(addr) + self.address_filter.allowed(&addr) } } -impl Default for QemuCmpLogChildHelper { +impl Default for CmpLogChildModule { fn default() -> Self { - Self::new(QemuInstrumentationAddressRangeFilter::None) + Self::new(StdAddressFilter::default()) } } -impl QemuHelper for QemuCmpLogChildHelper +impl EmulatorModule for CmpLogChildModule where - S: UsesInput, - S: HasMetadata, + S: Unpin + UsesInput + HasMetadata, { + type ModuleAddressFilter = StdAddressFilter; + #[cfg(feature = "systemmode")] + type ModulePageFilter = NopPageFilter; + const HOOKS_DO_SIDE_EFFECTS: bool = false; - fn first_exec(&self, hooks: &QemuHooks) + fn first_exec(&mut self, emulator_modules: &mut EmulatorModules, _state: &mut S) where - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, { - hooks.cmps( - Hook::Function(gen_hashed_cmp_ids::), + emulator_modules.cmps( + Hook::Function(gen_hashed_cmp_ids::), Hook::Raw(trace_cmp1_cmplog), Hook::Raw(trace_cmp2_cmplog), Hook::Raw(trace_cmp4_cmplog), Hook::Raw(trace_cmp8_cmplog), ); } + + fn address_filter(&self) -> &Self::ModuleAddressFilter { + &self.address_filter + } + + fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter { + &mut self.address_filter + } + + #[cfg(feature = "systemmode")] + fn page_filter(&self) -> &Self::ModulePageFilter { + &NopPageFilter + } + + #[cfg(feature = "systemmode")] + fn page_filter_mut(&mut self) -> &mut Self::ModulePageFilter { + unsafe { (&raw mut NOP_PAGE_FILTER).as_mut().unwrap().get_mut() } + } } -pub fn gen_unique_cmp_ids( - hooks: &mut QemuHooks, +pub fn gen_unique_cmp_ids( + emulator_modules: &mut EmulatorModules, state: Option<&mut S>, pc: GuestAddr, _size: usize, ) -> Option where - S: HasMetadata, - S: UsesInput, - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, + S: Unpin + UsesInput + HasMetadata, { - if let Some(h) = hooks.match_helper_mut::() { + if let Some(h) = emulator_modules.get::() { if !h.must_instrument(pc) { return None; } @@ -170,18 +202,17 @@ where })) } -pub fn gen_hashed_cmp_ids( - hooks: &mut QemuHooks, +pub fn gen_hashed_cmp_ids( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, pc: GuestAddr, _size: usize, ) -> Option where - S: HasMetadata, - S: UsesInput, - QT: QemuHelperTuple, + S: HasMetadata + Unpin + UsesInput, + ET: EmulatorModuleTuple, { - if let Some(h) = hooks.match_helper_mut::() { + if let Some(h) = emulator_modules.get::() { if !h.must_instrument(pc) { return None; } @@ -213,29 +244,31 @@ pub extern "C" fn trace_cmp8_cmplog(_: *const (), id: u64, v0: u64, v1: u64) { } } -#[cfg(emulation_mode = "usermode")] +#[cfg(feature = "usermode")] #[derive(Debug)] -pub struct QemuCmpLogRoutinesHelper { - filter: QemuInstrumentationAddressRangeFilter, +pub struct CmpLogRoutinesModule { + address_filter: StdAddressFilter, cs: Capstone, } -#[cfg(emulation_mode = "usermode")] -impl QemuCmpLogRoutinesHelper { +#[cfg(feature = "usermode")] +impl CmpLogRoutinesModule { #[must_use] - pub fn new(filter: QemuInstrumentationAddressRangeFilter) -> Self { + pub fn new(address_filter: StdAddressFilter) -> Self { Self { - filter, + address_filter, cs: capstone().detail(true).build().unwrap(), } } #[must_use] pub fn must_instrument(&self, addr: GuestAddr) -> bool { - self.filter.allowed(addr) + self.address_filter.allowed(&addr) } - extern "C" fn on_call(k: u64, _pc: GuestAddr) { + /// # Safety + /// Dereferences k as pointer eventually. + unsafe extern "C" fn on_call(k: u64, _pc: GuestAddr) { unsafe { if CMPLOG_ENABLED == 0 { return; @@ -262,16 +295,16 @@ impl QemuCmpLogRoutinesHelper { } } - fn gen_blocks_calls( - hooks: &mut QemuHooks, + fn gen_blocks_calls( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, pc: GuestAddr, ) -> Option where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - if let Some(h) = hooks.helpers_mut().match_first_type_mut::() { + if let Some(h) = emulator_modules.get_mut::() { if !h.must_instrument(pc) { return None; } @@ -285,19 +318,19 @@ impl QemuCmpLogRoutinesHelper { .unwrap(); } - let qemu = hooks.qemu(); + let qemu = emulator_modules.qemu(); - if let Some(h) = hooks.helpers().match_first_type::() { + if let Some(h) = emulator_modules.get::() { #[allow(unused_mut)] let mut code = { - #[cfg(emulation_mode = "usermode")] + #[cfg(feature = "usermode")] unsafe { std::slice::from_raw_parts(qemu.g2h(pc), 512) } - #[cfg(emulation_mode = "systemmode")] + #[cfg(feature = "systemmode")] &mut [0; 512] }; - #[cfg(emulation_mode = "systemmode")] + #[cfg(feature = "systemmode")] unsafe { qemu.read_mem(pc, code) }; // TODO handle faults @@ -314,7 +347,12 @@ impl QemuCmpLogRoutinesHelper { match u32::from(detail.0) { capstone::InsnGroupType::CS_GRP_CALL => { let k = (hash_me(pc.into())) & (CMPLOG_MAP_W as u64 - 1); - qemu.set_hook(k, insn.address() as GuestAddr, Self::on_call, false); + qemu.hooks().add_instruction_hooks( + k, + insn.address() as GuestAddr, + Self::on_call, + false, + ); } capstone::InsnGroupType::CS_GRP_RET | capstone::InsnGroupType::CS_GRP_INVALID @@ -329,11 +367,11 @@ impl QemuCmpLogRoutinesHelper { iaddr += insn.bytes().len() as GuestAddr; - #[cfg(emulation_mode = "usermode")] + #[cfg(feature = "usermode")] unsafe { code = std::slice::from_raw_parts(qemu.g2h(iaddr), 512); } - #[cfg(emulation_mode = "systemmode")] + #[cfg(feature = "systemmode")] unsafe { qemu.read_mem(pc, code); } // TODO handle faults @@ -344,30 +382,41 @@ impl QemuCmpLogRoutinesHelper { } } -#[cfg(emulation_mode = "usermode")] -impl HasInstrumentationFilter for QemuCmpLogRoutinesHelper { - fn filter(&self) -> &QemuInstrumentationAddressRangeFilter { - &self.filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationAddressRangeFilter { - &mut self.filter - } -} - -#[cfg(emulation_mode = "usermode")] -impl QemuHelper for QemuCmpLogRoutinesHelper +#[cfg(feature = "usermode")] +impl EmulatorModule for CmpLogRoutinesModule where - S: UsesInput, + S: Unpin + UsesInput, { - fn first_exec(&self, hooks: &QemuHooks) + type ModuleAddressFilter = StdAddressFilter; + #[cfg(feature = "systemmode")] + type ModulePageFilter = NopPageFilter; + + fn first_exec(&mut self, emulator_modules: &mut EmulatorModules, _state: &mut S) where - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, { - hooks.blocks( - Hook::Function(Self::gen_blocks_calls::), + emulator_modules.blocks( + Hook::Function(Self::gen_blocks_calls::), Hook::Empty, Hook::Empty, ); } + + fn address_filter(&self) -> &Self::ModuleAddressFilter { + &self.address_filter + } + + fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter { + &mut self.address_filter + } + + #[cfg(feature = "systemmode")] + fn page_filter(&self) -> &Self::ModulePageFilter { + &NopPageFilter + } + + #[cfg(feature = "systemmode")] + fn page_filter_mut(&mut self) -> &mut Self::ModulePageFilter { + &mut NopPageFilter + } } diff --git a/libafl_qemu/src/modules/drcov.rs b/libafl_qemu/src/modules/drcov.rs new file mode 100644 index 0000000000..e40d3dda0e --- /dev/null +++ b/libafl_qemu/src/modules/drcov.rs @@ -0,0 +1,444 @@ +use std::{path::PathBuf, sync::Mutex}; + +use hashbrown::{hash_map::Entry, HashMap}; +use libafl::{executors::ExitKind, inputs::UsesInput, observers::ObserversTuple, HasMetadata}; +use libafl_qemu_sys::{GuestAddr, GuestUsize}; +use libafl_targets::drcov::{DrCovBasicBlock, DrCovWriter}; +use rangemap::RangeMap; +use serde::{Deserialize, Serialize}; + +#[cfg(feature = "systemmode")] +use crate::modules::{NopPageFilter, NOP_PAGE_FILTER}; +use crate::{ + emu::EmulatorModules, + modules::{AddressFilter, EmulatorModule, EmulatorModuleTuple, NopAddressFilter}, + qemu::Hook, +}; + +static DRCOV_IDS: Mutex>> = Mutex::new(None); +static DRCOV_MAP: Mutex>> = Mutex::new(None); +static DRCOV_LENGTHS: Mutex>> = Mutex::new(None); + +#[cfg_attr( + any(not(feature = "serdeany_autoreg"), miri), + allow(clippy::unsafe_derive_deserialize) +)] // for SerdeAny +#[derive(Debug, Default, Serialize, Deserialize)] +pub struct DrCovMetadata { + pub current_id: u64, +} + +impl DrCovMetadata { + #[must_use] + pub fn new() -> Self { + Self { current_id: 0 } + } +} + +libafl_bolts::impl_serdeany!(DrCovMetadata); + +#[derive(Debug)] +pub struct DrCovModuleBuilder { + filter: Option, + module_mapping: Option>, + filename: Option, + full_trace: Option, +} + +impl DrCovModuleBuilder +where + F: AddressFilter, +{ + pub fn build(self) -> DrCovModule { + DrCovModule::new( + self.filter.unwrap(), + self.filename.unwrap(), + self.module_mapping, + self.full_trace.unwrap(), + ) + } + + pub fn filter(self, filter: F2) -> DrCovModuleBuilder { + DrCovModuleBuilder { + filter: Some(filter), + module_mapping: self.module_mapping, + filename: self.filename, + full_trace: self.full_trace, + } + } + + #[must_use] + pub fn module_mapping(self, module_mapping: RangeMap) -> Self { + Self { + filter: self.filter, + module_mapping: Some(module_mapping), + filename: self.filename, + full_trace: self.full_trace, + } + } + + #[must_use] + pub fn filename(self, filename: PathBuf) -> Self { + Self { + filter: self.filter, + module_mapping: self.module_mapping, + filename: Some(filename), + full_trace: self.full_trace, + } + } + + #[must_use] + pub fn full_trace(self, full_trace: bool) -> Self { + Self { + filter: self.filter, + module_mapping: self.module_mapping, + filename: self.filename, + full_trace: Some(full_trace), + } + } +} + +#[derive(Debug)] +pub struct DrCovModule { + filter: F, + module_mapping: Option>, + filename: PathBuf, + full_trace: bool, + drcov_len: usize, +} + +impl DrCovModule { + #[must_use] + pub fn builder() -> DrCovModuleBuilder { + DrCovModuleBuilder { + filter: Some(NopAddressFilter), + module_mapping: None, + full_trace: None, + filename: None, + } + } +} +impl DrCovModule { + #[must_use] + #[allow(clippy::let_underscore_untyped)] + pub fn new( + filter: F, + filename: PathBuf, + module_mapping: Option>, + full_trace: bool, + ) -> Self { + if full_trace { + let _ = DRCOV_IDS.lock().unwrap().insert(vec![]); + } + let _ = DRCOV_MAP.lock().unwrap().insert(HashMap::new()); + let _ = DRCOV_LENGTHS.lock().unwrap().insert(HashMap::new()); + Self { + filter, + module_mapping, + filename, + full_trace, + drcov_len: 0, + } + } + + pub fn write(&mut self) { + let lengths_opt = DRCOV_LENGTHS.lock().unwrap(); + let lengths = lengths_opt.as_ref().unwrap(); + if self.full_trace { + if DRCOV_IDS.lock().unwrap().as_ref().unwrap().len() > self.drcov_len { + let mut drcov_vec = Vec::::new(); + for id in DRCOV_IDS.lock().unwrap().as_ref().unwrap() { + 'pcs_full: for (pc, idm) in DRCOV_MAP.lock().unwrap().as_ref().unwrap() { + let mut module_found = false; + // # Safety + // + // Module mapping is already set. It's checked or filled when the module is first run. + unsafe { + for module in self.module_mapping.as_ref().unwrap_unchecked().iter() { + let (range, (_, _)) = module; + if *pc >= range.start.try_into().unwrap() + && *pc <= range.end.try_into().unwrap() + { + module_found = true; + break; + } + } + } + if !module_found { + continue 'pcs_full; + } + if *idm == *id { + #[allow(clippy::unnecessary_cast)] // for GuestAddr -> u64 + match lengths.get(pc) { + Some(block_length) => { + drcov_vec.push(DrCovBasicBlock::new( + *pc as u64, + *pc as u64 + *block_length as u64, + )); + } + None => { + log::info!("Failed to find block length for: {pc:}"); + } + } + } + } + } + + // # Safety + // + // Module mapping is already set. It's checked or filled when the module is first run. + unsafe { + DrCovWriter::new(self.module_mapping.as_ref().unwrap_unchecked()) + .write(&self.filename, &drcov_vec) + .expect("Failed to write coverage file"); + } + } + self.drcov_len = DRCOV_IDS.lock().unwrap().as_ref().unwrap().len(); + } else { + if DRCOV_MAP.lock().unwrap().as_ref().unwrap().len() > self.drcov_len { + let mut drcov_vec = Vec::::new(); + 'pcs: for (pc, _) in DRCOV_MAP.lock().unwrap().as_ref().unwrap() { + let mut module_found = false; + // # Safety + // + // Module mapping is already set. It's checked or filled when the module is first run. + unsafe { + for module in self.module_mapping.as_ref().unwrap_unchecked().iter() { + let (range, (_, _)) = module; + if *pc >= range.start.try_into().unwrap() + && *pc <= range.end.try_into().unwrap() + { + module_found = true; + break; + } + } + } + if !module_found { + continue 'pcs; + } + + #[allow(clippy::unnecessary_cast)] // for GuestAddr -> u64 + match lengths.get(pc) { + Some(block_length) => { + drcov_vec.push(DrCovBasicBlock::new( + *pc as u64, + *pc as u64 + *block_length as u64, + )); + } + None => { + log::info!("Failed to find block length for: {pc:}"); + } + } + } + + // # Safety + // + // Module mapping is already set. It's checked or filled when the module is first run. + unsafe { + DrCovWriter::new(self.module_mapping.as_ref().unwrap_unchecked()) + .write(&self.filename, &drcov_vec) + .expect("Failed to write coverage file"); + } + } + self.drcov_len = DRCOV_MAP.lock().unwrap().as_ref().unwrap().len(); + } + } +} + +impl DrCovModule +where + F: AddressFilter, +{ + #[must_use] + pub fn must_instrument(&self, addr: GuestAddr) -> bool { + self.filter.allowed(&addr) + } +} + +impl EmulatorModule for DrCovModule +where + F: AddressFilter, + S: Unpin + UsesInput + HasMetadata, +{ + type ModuleAddressFilter = F; + #[cfg(feature = "systemmode")] + type ModulePageFilter = NopPageFilter; + + fn post_qemu_init(&self, emulator_modules: &mut EmulatorModules) + where + ET: EmulatorModuleTuple, + { + emulator_modules.blocks( + Hook::Function(gen_unique_block_ids::), + Hook::Function(gen_block_lengths::), + Hook::Function(exec_trace_block::), + ); + } + + #[cfg(feature = "usermode")] + fn first_exec(&mut self, emulator_modules: &mut EmulatorModules, _state: &mut S) + where + ET: EmulatorModuleTuple, + { + if self.module_mapping.is_none() { + log::info!("Auto-filling module mapping for DrCov module from QEMU mapping."); + + let qemu = emulator_modules.qemu(); + + let mut module_mapping: RangeMap = RangeMap::new(); + + #[allow(clippy::unnecessary_cast)] // for GuestAddr -> u64 + for (i, (r, p)) in qemu + .mappings() + .filter_map(|m| { + m.path() + .map(|p| ((m.start() as u64)..(m.end() as u64), p.to_string())) + .filter(|(_, p)| !p.is_empty()) + }) + .enumerate() + { + module_mapping.insert(r, (i as u16, p)); + } + + self.module_mapping = Some(module_mapping); + } else { + log::info!("Using user-provided module mapping for DrCov module."); + } + } + + #[cfg(feature = "systemmode")] + fn first_exec(&mut self, _emulator_modules: &mut EmulatorModules, _state: &mut S) + where + ET: EmulatorModuleTuple, + { + assert!( + self.module_mapping.is_some(), + "DrCov should have a module mapping already set." + ); + } + + fn post_exec( + &mut self, + _emulator_modules: &mut EmulatorModules, + _state: &mut S, + _input: &S::Input, + _observers: &mut OT, + _exit_kind: &mut ExitKind, + ) where + OT: ObserversTuple, + ET: EmulatorModuleTuple, + { + self.write(); + } + + unsafe fn on_crash(&mut self) { + self.write(); + } + + unsafe fn on_timeout(&mut self) { + self.write(); + } + + fn address_filter(&self) -> &Self::ModuleAddressFilter { + &self.filter + } + + fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter { + &mut self.filter + } + + #[cfg(feature = "systemmode")] + fn page_filter(&self) -> &Self::ModulePageFilter { + &NopPageFilter + } + + #[cfg(feature = "systemmode")] + fn page_filter_mut(&mut self) -> &mut Self::ModulePageFilter { + unsafe { (&raw mut NOP_PAGE_FILTER).as_mut().unwrap().get_mut() } + } +} + +pub fn gen_unique_block_ids( + emulator_modules: &mut EmulatorModules, + state: Option<&mut S>, + pc: GuestAddr, +) -> Option +where + F: AddressFilter, + S: Unpin + UsesInput + HasMetadata, + ET: EmulatorModuleTuple, +{ + let drcov_module = emulator_modules.get::>().unwrap(); + if !drcov_module.must_instrument(pc) { + return None; + } + + let state = state.expect("The gen_unique_block_ids hook works only for in-process fuzzing"); + if state + .metadata_map_mut() + .get_mut::() + .is_none() + { + state.add_metadata(DrCovMetadata::new()); + } + let meta = state.metadata_map_mut().get_mut::().unwrap(); + + match DRCOV_MAP.lock().unwrap().as_mut().unwrap().entry(pc) { + Entry::Occupied(e) => { + let id = *e.get(); + if drcov_module.full_trace { + Some(id) + } else { + None + } + } + Entry::Vacant(e) => { + let id = meta.current_id; + e.insert(id); + meta.current_id = id + 1; + if drcov_module.full_trace { + // GuestAddress is u32 for 32 bit guests + #[allow(clippy::unnecessary_cast)] + Some(id as u64) + } else { + None + } + } + } +} + +pub fn gen_block_lengths( + emulator_modules: &mut EmulatorModules, + _state: Option<&mut S>, + pc: GuestAddr, + block_length: GuestUsize, +) where + F: AddressFilter, + S: Unpin + UsesInput + HasMetadata, + ET: EmulatorModuleTuple, +{ + let drcov_module = emulator_modules.get::>().unwrap(); + if !drcov_module.must_instrument(pc) { + return; + } + DRCOV_LENGTHS + .lock() + .unwrap() + .as_mut() + .unwrap() + .insert(pc, block_length); +} + +pub fn exec_trace_block( + emulator_modules: &mut EmulatorModules, + _state: Option<&mut S>, + id: u64, +) where + F: AddressFilter, + ET: EmulatorModuleTuple, + S: Unpin + UsesInput + HasMetadata, +{ + if emulator_modules.get::>().unwrap().full_trace { + DRCOV_IDS.lock().unwrap().as_mut().unwrap().push(id); + } +} diff --git a/libafl_qemu/src/modules/edges/child.rs b/libafl_qemu/src/modules/edges/child.rs new file mode 100644 index 0000000000..b823621940 --- /dev/null +++ b/libafl_qemu/src/modules/edges/child.rs @@ -0,0 +1,77 @@ +use libafl::{inputs::UsesInput, HasMetadata}; + +use super::{ + helpers::{gen_hashed_edge_ids, trace_edge_hitcount_ptr, trace_edge_single_ptr}, + EdgeCoverageVariant, +}; +use crate::{ + modules::{ + AddressFilter, EdgeCoverageModule, EdgeCoverageModuleBuilder, EmulatorModuleTuple, + PageFilter, StdAddressFilter, StdPageFilter, + }, + EmulatorModules, Hook, +}; + +#[derive(Debug)] +pub struct EdgeCoverageChildVariant; +pub type StdEdgeCoverageChildModule = + EdgeCoverageModule; +pub type StdEdgeCoverageChildModuleBuilder = EdgeCoverageModuleBuilder< + StdAddressFilter, + StdPageFilter, + EdgeCoverageChildVariant, + false, + false, + 0, +>; + +impl + EdgeCoverageVariant for EdgeCoverageChildVariant +{ + const DO_SIDE_EFFECTS: bool = false; + + fn fn_hitcount(&mut self, emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + emulator_modules.edges( + Hook::Function(gen_hashed_edge_ids::), + Hook::Raw(trace_edge_hitcount_ptr), + ); + } + + fn fn_no_hitcount(&mut self, emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + emulator_modules.edges( + Hook::Function(gen_hashed_edge_ids::), + Hook::Raw(trace_edge_single_ptr), + ); + } +} + +impl Default for StdEdgeCoverageChildModuleBuilder { + fn default() -> Self { + Self { + variant: EdgeCoverageChildVariant, + address_filter: StdAddressFilter::default(), + page_filter: StdPageFilter::default(), + use_hitcounts: true, + use_jit: true, + } + } +} + +impl StdEdgeCoverageChildModule { + #[must_use] + pub fn builder() -> StdEdgeCoverageChildModuleBuilder { + EdgeCoverageModuleBuilder::default().jit(false) + } +} diff --git a/libafl_qemu/src/modules/edges/classic.rs b/libafl_qemu/src/modules/edges/classic.rs new file mode 100644 index 0000000000..ed16c124ef --- /dev/null +++ b/libafl_qemu/src/modules/edges/classic.rs @@ -0,0 +1,124 @@ +use libafl::{inputs::UsesInput, HasMetadata}; + +use super::{ + helpers::{ + gen_hashed_block_ids, trace_block_transition_hitcount, trace_block_transition_single, + }, + EdgeCoverageVariant, +}; +use crate::{ + modules::{ + AddressFilter, EdgeCoverageModule, EdgeCoverageModuleBuilder, EmulatorModuleTuple, + PageFilter, StdAddressFilter, StdPageFilter, + }, + EmulatorModules, Hook, +}; + +#[derive(Debug)] +pub struct EdgeCoverageClassicVariant; + +pub type StdEdgeCoverageClassicModule = + EdgeCoverageModule; +pub type StdEdgeCoverageClassicModuleBuilder = EdgeCoverageModuleBuilder< + StdAddressFilter, + StdPageFilter, + EdgeCoverageClassicVariant, + false, + false, + 0, +>; + +impl + EdgeCoverageVariant for EdgeCoverageClassicVariant +{ + const DO_SIDE_EFFECTS: bool = false; + + fn jit_hitcount(&mut self, emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + let hook_id = emulator_modules.blocks( + Hook::Function(gen_hashed_block_ids::), + Hook::Empty, + Hook::Empty, + ); + + unsafe { + libafl_qemu_sys::libafl_qemu_block_hook_set_jit( + hook_id.0, + Some(libafl_qemu_sys::libafl_jit_trace_block_hitcount), + ); + } + } + + fn jit_no_hitcount(&mut self, emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + let hook_id = emulator_modules.blocks( + Hook::Function(gen_hashed_block_ids::), + Hook::Empty, + Hook::Empty, + ); + + unsafe { + libafl_qemu_sys::libafl_qemu_block_hook_set_jit( + hook_id.0, + Some(libafl_qemu_sys::libafl_jit_trace_block_single), + ); + } + } + + fn fn_hitcount(&mut self, emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + emulator_modules.blocks( + Hook::Function(gen_hashed_block_ids::), + Hook::Empty, + Hook::Raw(trace_block_transition_hitcount), + ); + } + + fn fn_no_hitcount(&mut self, emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + emulator_modules.blocks( + Hook::Function(gen_hashed_block_ids::), + Hook::Empty, + Hook::Raw(trace_block_transition_single), + ); + } +} + +impl Default for StdEdgeCoverageClassicModuleBuilder { + fn default() -> Self { + Self { + variant: EdgeCoverageClassicVariant, + address_filter: StdAddressFilter::default(), + page_filter: StdPageFilter::default(), + use_hitcounts: true, + use_jit: true, + } + } +} + +impl StdEdgeCoverageClassicModule { + #[must_use] + pub fn builder() -> StdEdgeCoverageClassicModuleBuilder { + EdgeCoverageModuleBuilder::default() + } +} diff --git a/libafl_qemu/src/modules/edges/full.rs b/libafl_qemu/src/modules/edges/full.rs new file mode 100644 index 0000000000..43de4a45d8 --- /dev/null +++ b/libafl_qemu/src/modules/edges/full.rs @@ -0,0 +1,114 @@ +use libafl::{inputs::UsesInput, HasMetadata}; + +use super::{ + helpers::{gen_unique_edge_ids, trace_edge_hitcount, trace_edge_single}, + EdgeCoverageVariant, +}; +use crate::{ + modules::{ + AddressFilter, EdgeCoverageModule, EdgeCoverageModuleBuilder, EmulatorModuleTuple, + PageFilter, StdAddressFilter, StdPageFilter, + }, + EmulatorModules, Hook, +}; + +#[derive(Debug)] +pub struct EdgeCoverageFullVariant; + +pub type StdEdgeCoverageFullModule = + EdgeCoverageModule; +pub type StdEdgeCoverageFullModuleBuilder = EdgeCoverageModuleBuilder< + StdAddressFilter, + StdPageFilter, + EdgeCoverageFullVariant, + false, + false, + 0, +>; + +impl + EdgeCoverageVariant for EdgeCoverageFullVariant +{ + fn jit_hitcount(&mut self, emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + let hook_id = emulator_modules.edges( + Hook::Function(gen_unique_edge_ids::), + Hook::Empty, + ); + unsafe { + libafl_qemu_sys::libafl_qemu_edge_hook_set_jit( + hook_id.0, + Some(libafl_qemu_sys::libafl_jit_trace_edge_hitcount), + ); + } + } + + fn jit_no_hitcount(&mut self, emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + let hook_id = emulator_modules.edges( + Hook::Function(gen_unique_edge_ids::), + Hook::Empty, + ); + unsafe { + libafl_qemu_sys::libafl_qemu_edge_hook_set_jit( + hook_id.0, + Some(libafl_qemu_sys::libafl_jit_trace_edge_single), + ); + } + } + + fn fn_hitcount(&mut self, emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + emulator_modules.edges( + Hook::Function(gen_unique_edge_ids::), + Hook::Raw(trace_edge_hitcount), + ); + } + + fn fn_no_hitcount(&mut self, emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + emulator_modules.edges( + Hook::Function(gen_unique_edge_ids::), + Hook::Raw(trace_edge_single), + ); + } +} + +impl Default for StdEdgeCoverageFullModuleBuilder { + fn default() -> Self { + Self { + variant: EdgeCoverageFullVariant, + address_filter: StdAddressFilter::default(), + page_filter: StdPageFilter::default(), + use_hitcounts: true, + use_jit: true, + } + } +} + +impl StdEdgeCoverageFullModule { + #[must_use] + pub fn builder() -> StdEdgeCoverageFullModuleBuilder { + EdgeCoverageModuleBuilder::default() + } +} diff --git a/libafl_qemu/src/modules/edges/helpers.rs b/libafl_qemu/src/modules/edges/helpers.rs new file mode 100644 index 0000000000..760fce7272 --- /dev/null +++ b/libafl_qemu/src/modules/edges/helpers.rs @@ -0,0 +1,342 @@ +use std::ptr; + +/// Generators, responsible for generating block/edge ids +pub use generators::{gen_hashed_block_ids, gen_hashed_edge_ids, gen_unique_edge_ids}; +use hashbrown::HashMap; +use libafl_qemu_sys::GuestAddr; +use serde::{Deserialize, Serialize}; +/// Tracers, responsible for propagating an ID in a map. +pub use tracers::{ + trace_block_transition_hitcount, trace_block_transition_single, trace_edge_hitcount, + trace_edge_hitcount_ptr, trace_edge_single, trace_edge_single_ptr, +}; + +// Constants used for variable-length maps + +#[no_mangle] +pub(super) static mut LIBAFL_QEMU_EDGES_MAP_PTR: *mut u8 = ptr::null_mut(); + +#[no_mangle] +pub(super) static mut LIBAFL_QEMU_EDGES_MAP_SIZE_PTR: *mut usize = ptr::null_mut(); + +#[no_mangle] +pub(super) static mut LIBAFL_QEMU_EDGES_MAP_ALLOCATED_SIZE: usize = 0; + +#[no_mangle] +pub(super) static mut LIBAFL_QEMU_EDGES_MAP_MASK_MAX: usize = 0; + +#[cfg_attr( + any(not(feature = "serdeany_autoreg"), miri), + allow(clippy::unsafe_derive_deserialize) +)] // for SerdeAny +#[derive(Debug, Default, Serialize, Deserialize)] +pub struct QemuEdgesMapMetadata { + pub map: HashMap<(GuestAddr, GuestAddr), u64>, + pub current_id: u64, +} + +libafl_bolts::impl_serdeany!(QemuEdgesMapMetadata); + +impl QemuEdgesMapMetadata { + #[must_use] + pub fn new() -> Self { + Self { + map: HashMap::new(), + current_id: 0, + } + } +} + +mod generators { + use std::{cmp::max, ptr}; + + use hashbrown::hash_map::Entry; + use libafl::{inputs::UsesInput, HasMetadata}; + use libafl_qemu_sys::GuestAddr; + + use super::{ + super::EdgeCoverageVariant, QemuEdgesMapMetadata, LIBAFL_QEMU_EDGES_MAP_MASK_MAX, + LIBAFL_QEMU_EDGES_MAP_SIZE_PTR, + }; + use crate::{ + modules::{hash_me, AddressFilter, EdgeCoverageModule, EmulatorModuleTuple, PageFilter}, + EmulatorModules, + }; + + fn get_mask() -> usize { + if IS_CONST_MAP { + const { + assert!( + !IS_CONST_MAP || MAP_SIZE > 0, + "The size of a const map should be bigger than 0." + ); + MAP_SIZE.overflowing_sub(1).0 + } + } else { + unsafe { LIBAFL_QEMU_EDGES_MAP_MASK_MAX } + } + } + + pub fn gen_unique_edge_ids( + emulator_modules: &mut EmulatorModules, + state: Option<&mut S>, + src: GuestAddr, + dest: GuestAddr, + ) -> Option + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + V: EdgeCoverageVariant, + { + if let Some(module) = + emulator_modules.get::>() + { + unsafe { + assert!(LIBAFL_QEMU_EDGES_MAP_MASK_MAX > 0); + let edges_map_size_ptr = &raw const LIBAFL_QEMU_EDGES_MAP_SIZE_PTR; + assert_ne!(*edges_map_size_ptr, ptr::null_mut()); + } + + #[cfg(feature = "usermode")] + { + if !module.must_instrument(src) && !module.must_instrument(dest) { + return None; + } + } + + #[cfg(feature = "systemmode")] + { + let paging_id = emulator_modules + .qemu() + .current_cpu() + .and_then(|cpu| cpu.current_paging_id()); + + if !module.must_instrument(src, paging_id) + && !module.must_instrument(dest, paging_id) + { + return None; + } + } + } + + let mask: usize = get_mask::(); + + let state = state.expect("The gen_unique_edge_ids hook works only for in-process fuzzing"); + let meta = state.metadata_or_insert_with(QemuEdgesMapMetadata::new); + + match meta.map.entry((src, dest)) { + Entry::Occupied(e) => { + let id = *e.get(); + unsafe { + let nxt = (id as usize + 1) & mask; + + if !IS_CONST_MAP { + *LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = max(*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR, nxt); + } + } + Some(id) + } + Entry::Vacant(e) => { + let id = meta.current_id; + e.insert(id); + unsafe { + meta.current_id = (id + 1) & (mask as u64); + + if !IS_CONST_MAP { + *LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = meta.current_id as usize; + } + } + // GuestAddress is u32 for 32 bit guests + #[allow(clippy::unnecessary_cast)] + Some(id as u64) + } + } + } + + #[allow(clippy::unnecessary_cast)] + pub fn gen_hashed_edge_ids( + emulator_modules: &mut EmulatorModules, + _state: Option<&mut S>, + src: GuestAddr, + dest: GuestAddr, + ) -> Option + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + V: EdgeCoverageVariant, + { + if let Some(module) = + emulator_modules.get::>() + { + #[cfg(feature = "usermode")] + if !module.must_instrument(src) && !module.must_instrument(dest) { + return None; + } + + #[cfg(feature = "systemmode")] + { + let paging_id = emulator_modules + .qemu() + .current_cpu() + .and_then(|cpu| cpu.current_paging_id()); + + if !module.must_instrument(src, paging_id) + && !module.must_instrument(dest, paging_id) + { + return None; + } + } + + let mask = get_mask::() as u64; + + #[allow(clippy::unnecessary_cast)] + let id = (hash_me(src as u64) ^ hash_me(dest as u64)) & mask; + + if !IS_CONST_MAP { + unsafe { + *LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = + max(*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR, id as usize); + } + } + + Some(id) + } else { + None + } + } + + #[allow(clippy::unnecessary_cast)] + pub fn gen_hashed_block_ids( + emulator_modules: &mut EmulatorModules, + _state: Option<&mut S>, + pc: GuestAddr, + ) -> Option + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + V: EdgeCoverageVariant, + { + // first check if we should filter + if let Some(module) = + emulator_modules.get::>() + { + #[cfg(feature = "usermode")] + { + if !module.must_instrument(pc) { + return None; + } + } + #[cfg(feature = "systemmode")] + { + let page_id = emulator_modules + .qemu() + .current_cpu() + .and_then(|cpu| cpu.current_paging_id()); + + if !module.must_instrument(pc, page_id) { + return None; + } + } + } + + let mask = get_mask::() as u64; + + let id = hash_me(pc as u64) & mask; + + if !IS_CONST_MAP { + unsafe { + *LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = max(*LIBAFL_QEMU_EDGES_MAP_SIZE_PTR, id as usize); + } + } + + // GuestAddress is u32 for 32 bit guests + #[allow(clippy::unnecessary_cast)] + Some(id) + } +} + +mod tracers { + use std::cell::UnsafeCell; + + use libafl_targets::EDGES_MAP; + + use super::{LIBAFL_QEMU_EDGES_MAP_MASK_MAX, LIBAFL_QEMU_EDGES_MAP_PTR}; + + thread_local!(static PREV_LOC : UnsafeCell = const { UnsafeCell::new(0) }); + + /// # Safety + /// + /// - @id should be the one generated by a gen_* function from this module. + /// - Calling this concurrently for the same id is racey and may lose updates. + pub unsafe extern "C" fn trace_edge_hitcount(_: *const (), id: u64) { + unsafe { + EDGES_MAP[id as usize] = EDGES_MAP[id as usize].wrapping_add(1); + } + } + + /// # Safety + /// + /// - @id should be the one generated by a gen_* function from this module. + pub unsafe extern "C" fn trace_edge_single(_: *const (), id: u64) { + // # Safety + // Worst case we set the byte to 1 multiple times.. + unsafe { + EDGES_MAP[id as usize] = 1; + } + } + + /// # Safety + /// + /// Increases id at `EDGES_MAP_PTR` - potentially racey if called concurrently. + pub unsafe extern "C" fn trace_edge_hitcount_ptr(_: *const (), id: u64) { + unsafe { + let ptr = LIBAFL_QEMU_EDGES_MAP_PTR.add(id as usize); + *ptr = (*ptr).wrapping_add(1); + } + } + + /// # Safety + /// + /// Fine. + /// Worst case we set the byte to 1 multiple times. + pub unsafe extern "C" fn trace_edge_single_ptr(_: *const (), id: u64) { + unsafe { + let ptr = LIBAFL_QEMU_EDGES_MAP_PTR.add(id as usize); + *ptr = 1; + } + } + + /// # Safety + /// + /// Dereferences the global `PREV_LOC` variable. May not be called concurrently. + pub unsafe extern "C" fn trace_block_transition_hitcount(_: *const (), id: u64) { + unsafe { + PREV_LOC.with(|prev_loc| { + let x = ((*prev_loc.get() ^ id) as usize) & LIBAFL_QEMU_EDGES_MAP_MASK_MAX; + let entry = LIBAFL_QEMU_EDGES_MAP_PTR.add(x); + *entry = (*entry).wrapping_add(1); + *prev_loc.get() = id.overflowing_shr(1).0; + }); + } + } + + /// # Safety + /// + /// Dereferences the global `PREV_LOC` variable. May not be called concurrently. + pub unsafe extern "C" fn trace_block_transition_single(_: *const (), id: u64) { + unsafe { + PREV_LOC.with(|prev_loc| { + let x = ((*prev_loc.get() ^ id) as usize) & LIBAFL_QEMU_EDGES_MAP_MASK_MAX; + let entry = LIBAFL_QEMU_EDGES_MAP_PTR.add(x); + *entry = 1; + *prev_loc.get() = id.overflowing_shr(1).0; + }); + } + } +} diff --git a/libafl_qemu/src/modules/edges/mod.rs b/libafl_qemu/src/modules/edges/mod.rs new file mode 100644 index 0000000000..db2538ca7e --- /dev/null +++ b/libafl_qemu/src/modules/edges/mod.rs @@ -0,0 +1,402 @@ +use std::fmt::Debug; + +use libafl::{inputs::UsesInput, observers::VarLenMapObserver, HasMetadata}; +use libafl_bolts::Error; +use libafl_qemu_sys::GuestAddr; +#[cfg(feature = "systemmode")] +use libafl_qemu_sys::GuestPhysAddr; + +use crate::{ + emu::EmulatorModules, + modules::{AddressFilter, EmulatorModule, EmulatorModuleTuple, PageFilter}, +}; + +mod helpers; +use helpers::{ + LIBAFL_QEMU_EDGES_MAP_ALLOCATED_SIZE, LIBAFL_QEMU_EDGES_MAP_MASK_MAX, + LIBAFL_QEMU_EDGES_MAP_PTR, LIBAFL_QEMU_EDGES_MAP_SIZE_PTR, +}; + +pub mod full; +pub use full::{ + EdgeCoverageFullVariant, StdEdgeCoverageFullModule, StdEdgeCoverageFullModuleBuilder, +}; + +pub mod classic; +pub use classic::{ + EdgeCoverageClassicVariant, StdEdgeCoverageClassicModule, StdEdgeCoverageClassicModuleBuilder, +}; + +pub mod child; +pub use child::{ + EdgeCoverageChildVariant, StdEdgeCoverageChildModule, StdEdgeCoverageChildModuleBuilder, +}; +use libafl::observers::ConstLenMapObserver; + +/// Standard edge coverage module, adapted to most use cases +pub type StdEdgeCoverageModule = StdEdgeCoverageFullModule; + +/// Standard edge coverage module builder, adapted to most use cases +pub type StdEdgeCoverageModuleBuilder = StdEdgeCoverageFullModuleBuilder; + +pub type CollidingEdgeCoverageModule = + EdgeCoverageModule; + +/// An edge coverage module variant. +trait EdgeCoverageVariant: + 'static + Debug +{ + const DO_SIDE_EFFECTS: bool = true; + + fn jit_hitcount(&mut self, _emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + panic!("JIT hitcount is not supported.") + } + + fn jit_no_hitcount(&mut self, _emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + panic!("JIT no hitcount is not supported.") + } + + fn fn_hitcount(&mut self, _emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + panic!("Func hitcount is not supported.") + } + + fn fn_no_hitcount(&mut self, _emulator_modules: &mut EmulatorModules) + where + AF: AddressFilter, + ET: EmulatorModuleTuple, + PF: PageFilter, + S: Unpin + UsesInput + HasMetadata, + { + panic!("Func no hitcount is not supported.") + } +} + +#[derive(Debug)] +pub struct EdgeCoverageModuleBuilder< + AF, + PF, + V, + const IS_INITIALIZED: bool, + const IS_CONST_MAP: bool, + const MAP_SIZE: usize, +> { + variant: V, + address_filter: AF, + page_filter: PF, + use_hitcounts: bool, + use_jit: bool, +} + +#[derive(Debug)] +pub struct EdgeCoverageModule { + variant: V, + address_filter: AF, + // we only use it in system mode at the moment. + #[cfg_attr(not(feature = "systemmode"), allow(dead_code))] + page_filter: PF, + use_hitcounts: bool, + use_jit: bool, +} + +impl + EdgeCoverageModuleBuilder +{ + pub fn build(self) -> Result, Error> { + const { + assert!( + IS_INITIALIZED, + "The edge module builder must be first initialized with a call to `map_observer`." + ); + }; + + Ok(EdgeCoverageModule::new( + self.address_filter, + self.page_filter, + self.variant, + self.use_hitcounts, + self.use_jit, + )) + } +} + +impl + EdgeCoverageModuleBuilder +{ + fn new( + variant: V, + address_filter: AF, + page_filter: PF, + use_hitcounts: bool, + use_jit: bool, + ) -> Self { + Self { + variant, + address_filter, + page_filter, + use_hitcounts, + use_jit, + } + } + + #[must_use] + pub fn map_observer( + self, + map_observer: &mut O, + ) -> EdgeCoverageModuleBuilder + where + O: VarLenMapObserver, + { + let map_ptr = map_observer.map_slice_mut().as_mut_ptr() as *mut u8; + let map_max_size = map_observer.map_slice_mut().len(); + let size_ptr = map_observer.as_mut().size_mut() as *mut usize; + + unsafe { + LIBAFL_QEMU_EDGES_MAP_PTR = map_ptr; + LIBAFL_QEMU_EDGES_MAP_SIZE_PTR = size_ptr; + LIBAFL_QEMU_EDGES_MAP_ALLOCATED_SIZE = map_max_size; + LIBAFL_QEMU_EDGES_MAP_MASK_MAX = map_max_size - 1; + } + + EdgeCoverageModuleBuilder::::new( + self.variant, + self.address_filter, + self.page_filter, + self.use_hitcounts, + self.use_jit, + ) + } + + #[must_use] + pub fn const_map_observer( + self, + _const_map_observer: &mut O, + ) -> EdgeCoverageModuleBuilder + where + O: ConstLenMapObserver, + { + EdgeCoverageModuleBuilder::::new( + self.variant, + self.address_filter, + self.page_filter, + self.use_hitcounts, + self.use_jit, + ) + } + + pub fn variant( + self, + variant: V2, + ) -> EdgeCoverageModuleBuilder { + EdgeCoverageModuleBuilder::new( + variant, + self.address_filter, + self.page_filter, + self.use_hitcounts, + self.use_jit, + ) + } + + pub fn address_filter( + self, + address_filter: AF2, + ) -> EdgeCoverageModuleBuilder { + EdgeCoverageModuleBuilder::new( + self.variant, + address_filter, + self.page_filter, + self.use_hitcounts, + self.use_jit, + ) + } + + pub fn page_filter( + self, + page_filter: PF2, + ) -> EdgeCoverageModuleBuilder { + EdgeCoverageModuleBuilder::new( + self.variant, + self.address_filter, + page_filter, + self.use_hitcounts, + self.use_jit, + ) + } + + #[must_use] + pub fn hitcounts( + self, + use_hitcounts: bool, + ) -> EdgeCoverageModuleBuilder { + EdgeCoverageModuleBuilder::new( + self.variant, + self.address_filter, + self.page_filter, + use_hitcounts, + self.use_jit, + ) + } + + #[must_use] + pub fn jit( + self, + use_jit: bool, + ) -> EdgeCoverageModuleBuilder { + EdgeCoverageModuleBuilder::new( + self.variant, + self.address_filter, + self.page_filter, + self.use_hitcounts, + use_jit, + ) + } +} + +impl + EdgeCoverageModule +{ + #[must_use] + pub fn new( + address_filter: AF, + page_filter: PF, + variant: V, + use_hitcounts: bool, + use_jit: bool, + ) -> Self { + Self { + variant, + address_filter, + page_filter, + use_hitcounts, + use_jit, + } + } +} + +impl + EdgeCoverageModule +where + AF: AddressFilter, + PF: PageFilter, +{ + #[cfg(feature = "usermode")] + #[must_use] + pub fn must_instrument(&self, addr: GuestAddr) -> bool { + self.address_filter.allowed(&addr) + } + + #[cfg(feature = "systemmode")] + #[must_use] + pub fn must_instrument(&self, addr: GuestAddr, page_id: Option) -> bool { + if let Some(page_id) = page_id { + self.address_filter.allowed(&addr) && self.page_filter.allowed(&page_id) + } else { + self.address_filter.allowed(&addr) + } + } +} + +impl EmulatorModule + for EdgeCoverageModule +where + AF: AddressFilter + 'static, + PF: PageFilter + 'static, + S: Unpin + UsesInput + HasMetadata, + V: EdgeCoverageVariant + 'static, +{ + type ModuleAddressFilter = AF; + + #[cfg(feature = "systemmode")] + type ModulePageFilter = PF; + const HOOKS_DO_SIDE_EFFECTS: bool = V::DO_SIDE_EFFECTS; + + fn first_exec(&mut self, emulator_modules: &mut EmulatorModules, _state: &mut S) + where + ET: EmulatorModuleTuple, + { + if self.use_hitcounts { + if self.use_jit { + self.variant.jit_hitcount(emulator_modules); + } else { + self.variant.fn_hitcount(emulator_modules); + } + } else if self.use_jit { + self.variant.jit_no_hitcount(emulator_modules); + } else { + self.variant.fn_no_hitcount(emulator_modules); + } + } + + fn address_filter(&self) -> &Self::ModuleAddressFilter { + &self.address_filter + } + + fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter { + &mut self.address_filter + } + + #[cfg(feature = "systemmode")] + fn page_filter(&self) -> &Self::ModulePageFilter { + &self.page_filter + } + + #[cfg(feature = "systemmode")] + fn page_filter_mut(&mut self) -> &mut Self::ModulePageFilter { + &mut self.page_filter + } +} + +#[cfg(any(test, doc))] +mod tests { + + use libafl::observers::{CanTrack, HitcountsMapObserver, VariableMapObserver}; + use libafl_bolts::ownedref::OwnedMutSlice; + use libafl_targets::{edges_map_mut_ptr, EDGES_MAP_DEFAULT_SIZE, MAX_EDGES_FOUND}; + + use crate::modules::StdEdgeCoverageModule; + + /// The test is actually implemented as a doctest, since Rust does not + /// permit tests that must not compile by default... + /// + /// ```compile_fail + /// use libafl_qemu::modules::StdEdgeCoverageModule; + /// + /// StdEdgeCoverageModule::builder().build().unwrap(); + /// ``` + #[allow(unused)] + pub fn does_not_build() {} + + #[test] + pub fn does_build() { + let mut edges_observer = unsafe { + HitcountsMapObserver::new(VariableMapObserver::from_mut_slice( + "edges", + OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_DEFAULT_SIZE), + &raw mut MAX_EDGES_FOUND, + )) + .track_indices() + }; + + StdEdgeCoverageModule::builder() + .map_observer(edges_observer.as_mut()) + .build() + .unwrap(); + } +} diff --git a/libafl_qemu/src/modules/mod.rs b/libafl_qemu/src/modules/mod.rs new file mode 100644 index 0000000000..04b80a84ee --- /dev/null +++ b/libafl_qemu/src/modules/mod.rs @@ -0,0 +1,555 @@ +use core::{fmt::Debug, ops::Range}; +use std::cell::UnsafeCell; + +use hashbrown::HashSet; +use libafl::{executors::ExitKind, inputs::UsesInput, observers::ObserversTuple}; +use libafl_bolts::tuples::{MatchFirstType, SplitBorrowExtractFirstType}; +use libafl_qemu_sys::{GuestAddr, GuestPhysAddr}; + +#[cfg(feature = "usermode")] +pub mod usermode; +#[cfg(feature = "usermode")] +pub use usermode::*; + +#[cfg(feature = "systemmode")] +pub mod systemmode; +#[cfg(feature = "systemmode")] +#[allow(unused_imports)] +pub use systemmode::*; + +pub mod edges; +pub use edges::{ + EdgeCoverageModule, EdgeCoverageModuleBuilder, StdEdgeCoverageChildModule, + StdEdgeCoverageChildModuleBuilder, StdEdgeCoverageClassicModule, + StdEdgeCoverageClassicModuleBuilder, StdEdgeCoverageFullModule, + StdEdgeCoverageFullModuleBuilder, StdEdgeCoverageModule, StdEdgeCoverageModuleBuilder, +}; + +#[cfg(not(cpu_target = "hexagon"))] +pub mod calls; +#[cfg(not(cpu_target = "hexagon"))] +pub use calls::CallTracerModule; + +#[cfg(not(any(cpu_target = "mips", cpu_target = "hexagon")))] +pub mod cmplog; +#[cfg(not(any(cpu_target = "mips", cpu_target = "hexagon")))] +pub use cmplog::CmpLogModule; + +#[cfg(not(cpu_target = "hexagon"))] +pub mod drcov; +#[cfg(not(cpu_target = "hexagon"))] +pub use drcov::{DrCovMetadata, DrCovModule, DrCovModuleBuilder}; + +use crate::{emu::EmulatorModules, EmulatorHooks, Qemu}; + +/// A module for `libafl_qemu`. +// TODO remove 'static when specialization will be stable +pub trait EmulatorModule: 'static + Debug +where + S: UsesInput, +{ + type ModuleAddressFilter: AddressFilter; + + #[cfg(feature = "systemmode")] + type ModulePageFilter: PageFilter; + + const HOOKS_DO_SIDE_EFFECTS: bool = true; + + /// Hook run **before** QEMU is initialized. + /// This is always run when Emulator gets initialized, in any case. + /// Install here hooks that should be alive for the whole execution of the VM, even before QEMU gets initialized. + fn pre_qemu_init(&self, _emulator_hooks: &mut EmulatorHooks) + where + ET: EmulatorModuleTuple, + { + } + + /// Hook run **after** QEMU is initialized. + /// This is always run when Emulator gets initialized, in any case. + /// Install here hooks that should be alive for the whole execution of the VM, after QEMU gets initialized. + fn post_qemu_init(&self, _emulator_modules: &mut EmulatorModules) + where + ET: EmulatorModuleTuple, + { + } + + /// Run once just before fuzzing starts. + /// This call can be delayed to the point at which fuzzing is supposed to start. + /// It is mostly used to avoid running hooks during VM initialization, either + /// because it is useless or it would produce wrong results. + fn first_exec(&mut self, _emulator_modules: &mut EmulatorModules, _state: &mut S) + where + ET: EmulatorModuleTuple, + { + } + + /// Run before a new fuzzing run starts. + /// On the first run, it is executed after [`Self::first_exec`]. + fn pre_exec( + &mut self, + _emulator_modules: &mut EmulatorModules, + _state: &mut S, + _input: &S::Input, + ) where + ET: EmulatorModuleTuple, + { + } + + /// Run after a fuzzing run ends. + fn post_exec( + &mut self, + _emulator_modules: &mut EmulatorModules, + _state: &mut S, + _input: &S::Input, + _observers: &mut OT, + _exit_kind: &mut ExitKind, + ) where + OT: ObserversTuple, + ET: EmulatorModuleTuple, + { + } + + /// # Safety + /// + /// This is getting executed in a signal handler. + unsafe fn on_crash(&mut self) {} + + /// # Safety + /// + /// This is getting executed in a signal handler. + unsafe fn on_timeout(&mut self) {} + + fn address_filter(&self) -> &Self::ModuleAddressFilter; + fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter; + fn update_address_filter(&mut self, qemu: Qemu, filter: Self::ModuleAddressFilter) { + *self.address_filter_mut() = filter; + // Necessary because some hooks filter during TB generation. + qemu.flush_jit(); + } + + #[cfg(feature = "systemmode")] + fn page_filter(&self) -> &Self::ModulePageFilter; + #[cfg(feature = "systemmode")] + fn page_filter_mut(&mut self) -> &mut Self::ModulePageFilter; + #[cfg(feature = "systemmode")] + fn update_page_filter(&mut self, qemu: Qemu, filter: Self::ModulePageFilter) { + *self.page_filter_mut() = filter; + // Necessary because some hooks filter during TB generation. + qemu.flush_jit(); + } +} + +pub trait EmulatorModuleTuple: + MatchFirstType + for<'a> SplitBorrowExtractFirstType<'a> + Unpin +where + S: UsesInput, +{ + const HOOKS_DO_SIDE_EFFECTS: bool; + + fn pre_qemu_init_all(&self, _emulator_hooks: &mut EmulatorHooks) + where + ET: EmulatorModuleTuple; + + fn post_qemu_init_all(&self, _emulator_modules: &mut EmulatorModules) + where + ET: EmulatorModuleTuple; + + fn first_exec_all(&mut self, emulator_modules: &mut EmulatorModules, state: &mut S) + where + ET: EmulatorModuleTuple; + + fn pre_exec_all( + &mut self, + emulator_modules: &mut EmulatorModules, + state: &mut S, + input: &S::Input, + ) where + ET: EmulatorModuleTuple; + + fn post_exec_all( + &mut self, + emulator_modules: &mut EmulatorModules, + state: &mut S, + input: &S::Input, + observers: &mut OT, + exit_kind: &mut ExitKind, + ) where + OT: ObserversTuple, + ET: EmulatorModuleTuple; + + /// # Safety + /// + /// This is getting executed in a signal handler. + unsafe fn on_crash_all(&mut self); + + /// # Safety + /// + /// This is getting executed in a signal handler. + unsafe fn on_timeout_all(&mut self); + + fn allow_address_range_all(&mut self, address_range: Range); + + #[cfg(feature = "systemmode")] + fn allow_page_id_all(&mut self, page_id: GuestPhysAddr); +} + +impl EmulatorModuleTuple for () +where + S: UsesInput, +{ + const HOOKS_DO_SIDE_EFFECTS: bool = false; + + fn pre_qemu_init_all(&self, _emulator_hooks: &mut EmulatorHooks) + where + ET: EmulatorModuleTuple, + { + } + + fn post_qemu_init_all(&self, _emulator_modules: &mut EmulatorModules) + where + ET: EmulatorModuleTuple, + { + } + + fn first_exec_all(&mut self, _emulator_modules: &mut EmulatorModules, _state: &mut S) + where + ET: EmulatorModuleTuple, + { + } + + fn pre_exec_all( + &mut self, + _emulator_modules: &mut EmulatorModules, + _state: &mut S, + _input: &S::Input, + ) where + ET: EmulatorModuleTuple, + { + } + + fn post_exec_all( + &mut self, + _emulator_modules: &mut EmulatorModules, + _state: &mut S, + _input: &S::Input, + _observers: &mut OT, + _exit_kind: &mut ExitKind, + ) where + OT: ObserversTuple, + ET: EmulatorModuleTuple, + { + } + + unsafe fn on_crash_all(&mut self) {} + + unsafe fn on_timeout_all(&mut self) {} + + fn allow_address_range_all(&mut self, _address_range: Range) {} + + #[cfg(feature = "systemmode")] + fn allow_page_id_all(&mut self, _page_id: GuestPhysAddr) {} +} + +impl EmulatorModuleTuple for (Head, Tail) +where + Head: EmulatorModule + Unpin, + Tail: EmulatorModuleTuple, + S: UsesInput + Unpin, +{ + const HOOKS_DO_SIDE_EFFECTS: bool = Head::HOOKS_DO_SIDE_EFFECTS || Tail::HOOKS_DO_SIDE_EFFECTS; + + fn pre_qemu_init_all(&self, emulator_hooks: &mut EmulatorHooks) + where + ET: EmulatorModuleTuple, + { + self.0.pre_qemu_init(emulator_hooks); + self.1.pre_qemu_init_all(emulator_hooks); + } + + fn post_qemu_init_all(&self, emulator_modules: &mut EmulatorModules) + where + ET: EmulatorModuleTuple, + { + self.0.post_qemu_init(emulator_modules); + self.1.post_qemu_init_all(emulator_modules); + } + + fn first_exec_all(&mut self, emulator_modules: &mut EmulatorModules, state: &mut S) + where + ET: EmulatorModuleTuple, + { + self.0.first_exec(emulator_modules, state); + self.1.first_exec_all(emulator_modules, state); + } + + fn pre_exec_all( + &mut self, + emulator_modules: &mut EmulatorModules, + state: &mut S, + input: &S::Input, + ) where + ET: EmulatorModuleTuple, + { + self.0.pre_exec(emulator_modules, state, input); + self.1.pre_exec_all(emulator_modules, state, input); + } + + fn post_exec_all( + &mut self, + emulator_modules: &mut EmulatorModules, + state: &mut S, + input: &S::Input, + observers: &mut OT, + exit_kind: &mut ExitKind, + ) where + OT: ObserversTuple, + ET: EmulatorModuleTuple, + { + self.0 + .post_exec(emulator_modules, state, input, observers, exit_kind); + self.1 + .post_exec_all(emulator_modules, state, input, observers, exit_kind); + } + + unsafe fn on_crash_all(&mut self) { + self.0.on_crash(); + self.1.on_crash_all(); + } + + unsafe fn on_timeout_all(&mut self) { + self.0.on_timeout(); + self.1.on_timeout_all(); + } + + fn allow_address_range_all(&mut self, address_range: Range) { + self.0.address_filter_mut().register(address_range.clone()); + self.1.allow_address_range_all(address_range); + } + + #[cfg(feature = "systemmode")] + fn allow_page_id_all(&mut self, page_id: GuestPhysAddr) { + self.0.page_filter_mut().register(page_id.clone()); + self.1.allow_page_id_all(page_id) + } +} + +#[derive(Debug, Clone)] +pub enum FilterList { + AllowList(T), + DenyList(T), + None, +} + +impl AddressFilter for FilterList +where + T: AddressFilter, +{ + fn register(&mut self, address_range: Range) { + match self { + FilterList::AllowList(allow_list) => allow_list.register(address_range), + FilterList::DenyList(deny_list) => deny_list.register(address_range), + FilterList::None => {} + } + } + + fn allowed(&self, address: &GuestAddr) -> bool { + match self { + FilterList::AllowList(allow_list) => allow_list.allowed(address), + FilterList::DenyList(deny_list) => !deny_list.allowed(address), + FilterList::None => true, + } + } +} + +impl PageFilter for FilterList +where + T: PageFilter, +{ + fn register(&mut self, page_id: GuestPhysAddr) { + match self { + FilterList::AllowList(allow_list) => allow_list.register(page_id), + FilterList::DenyList(deny_list) => deny_list.register(page_id), + FilterList::None => {} + } + } + + fn allowed(&self, page: &GuestPhysAddr) -> bool { + match self { + FilterList::AllowList(allow_list) => allow_list.allowed(page), + FilterList::DenyList(deny_list) => !deny_list.allowed(page), + FilterList::None => true, + } + } +} + +#[derive(Clone, Debug, Default)] +pub struct AddressFilterVec { + // ideally, we should use a tree + registered_addresses: Vec>, +} +#[derive(Clone, Debug)] +pub struct StdAddressFilter(FilterList); + +impl Default for StdAddressFilter { + fn default() -> Self { + Self(FilterList::None) + } +} + +impl StdAddressFilter { + #[must_use] + pub fn allow_list(registered_addresses: Vec>) -> Self { + StdAddressFilter(FilterList::AllowList(AddressFilterVec::new( + registered_addresses, + ))) + } + + #[must_use] + pub fn deny_list(registered_addresses: Vec>) -> Self { + StdAddressFilter(FilterList::DenyList(AddressFilterVec::new( + registered_addresses, + ))) + } +} + +impl AddressFilterVec { + #[must_use] + pub fn new(registered_addresses: Vec>) -> Self { + Self { + registered_addresses, + } + } +} + +impl AddressFilter for AddressFilterVec { + fn register(&mut self, address_range: Range) { + self.registered_addresses.push(address_range); + Qemu::get().unwrap().flush_jit(); + } + + fn allowed(&self, addr: &GuestAddr) -> bool { + if self.registered_addresses.is_empty() { + return true; + } + + for addr_range in &self.registered_addresses { + if addr_range.contains(addr) { + return true; + } + } + + false + } +} + +impl AddressFilter for StdAddressFilter { + fn register(&mut self, address_range: Range) { + self.0.register(address_range); + } + + fn allowed(&self, address: &GuestAddr) -> bool { + self.0.allowed(address) + } +} + +#[derive(Clone, Debug)] +pub struct PageFilterVec { + registered_pages: HashSet, +} + +#[cfg(feature = "systemmode")] +#[derive(Clone, Debug)] +pub struct StdPageFilter(FilterList); + +#[cfg(feature = "usermode")] +pub type StdPageFilter = NopPageFilter; + +impl Default for PageFilterVec { + fn default() -> Self { + Self { + registered_pages: HashSet::new(), + } + } +} + +#[cfg(feature = "systemmode")] +impl Default for StdPageFilter { + fn default() -> Self { + Self(FilterList::None) + } +} + +impl PageFilter for PageFilterVec { + fn register(&mut self, page_id: GuestPhysAddr) { + self.registered_pages.insert(page_id); + Qemu::get().unwrap().flush_jit(); + } + + fn allowed(&self, paging_id: &GuestPhysAddr) -> bool { + // if self.allowed_pages.is_empty() { + // return true; + // } + + self.registered_pages.contains(paging_id) + } +} + +#[cfg(feature = "systemmode")] +impl PageFilter for StdPageFilter { + fn register(&mut self, page_id: GuestPhysAddr) { + self.0.register(page_id); + } + + fn allowed(&self, page_id: &GuestPhysAddr) -> bool { + self.0.allowed(page_id) + } +} + +// adapted from https://xorshift.di.unimi.it/splitmix64.c +#[must_use] +pub fn hash_me(mut x: u64) -> u64 { + x = (x ^ (x.overflowing_shr(30).0)) + .overflowing_mul(0xbf58476d1ce4e5b9) + .0; + x = (x ^ (x.overflowing_shr(27).0)) + .overflowing_mul(0x94d049bb133111eb) + .0; + x ^ (x.overflowing_shr(31).0) +} + +pub trait AddressFilter: 'static + Debug { + fn register(&mut self, address_range: Range); + + fn allowed(&self, address: &GuestAddr) -> bool; +} + +#[derive(Debug)] +pub struct NopAddressFilter; +impl AddressFilter for NopAddressFilter { + fn register(&mut self, _address: Range) {} + + fn allowed(&self, _address: &GuestAddr) -> bool { + true + } +} + +pub trait PageFilter: 'static + Debug { + fn register(&mut self, page_id: GuestPhysAddr); + + fn allowed(&self, page_id: &GuestPhysAddr) -> bool; +} + +#[derive(Clone, Debug, Default)] +pub struct NopPageFilter; +impl PageFilter for NopPageFilter { + fn register(&mut self, _page_id: GuestPhysAddr) {} + + fn allowed(&self, _page_id: &GuestPhysAddr) -> bool { + true + } +} + +#[cfg(feature = "usermode")] +static mut NOP_ADDRESS_FILTER: UnsafeCell = UnsafeCell::new(NopAddressFilter); +#[cfg(feature = "systemmode")] +static mut NOP_PAGE_FILTER: UnsafeCell = UnsafeCell::new(NopPageFilter); diff --git a/libafl_qemu/src/modules/systemmode/mod.rs b/libafl_qemu/src/modules/systemmode/mod.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/libafl_qemu/src/modules/systemmode/mod.rs @@ -0,0 +1 @@ + diff --git a/libafl_qemu/src/helpers/asan.rs b/libafl_qemu/src/modules/usermode/asan.rs similarity index 83% rename from libafl_qemu/src/helpers/asan.rs rename to libafl_qemu/src/modules/usermode/asan.rs index 87819da9c5..45ee1e8626 100644 --- a/libafl_qemu/src/helpers/asan.rs +++ b/libafl_qemu/src/modules/usermode/asan.rs @@ -1,14 +1,9 @@ #![allow(clippy::cast_possible_wrap)] -use std::{ - borrow::Cow, - collections::{HashMap, HashSet}, - env, fs, - path::PathBuf, - sync::Mutex, -}; +use std::{borrow::Cow, env, fs, path::PathBuf, sync::Mutex}; -use libafl::{executors::ExitKind, inputs::UsesInput, observers::ObserversTuple, HasMetadata}; +use hashbrown::{HashMap, HashSet}; +use libafl::{executors::ExitKind, inputs::UsesInput, observers::ObserversTuple}; use libc::{ c_void, MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_NORESERVE, MAP_PRIVATE, PROT_READ, PROT_WRITE, }; @@ -17,15 +12,13 @@ use num_enum::{IntoPrimitive, TryFromPrimitive}; use rangemap::RangeMap; use crate::{ - helpers::{ - calls::FullBacktraceCollector, HasInstrumentationFilter, IsFilter, QemuHelper, - QemuHelperTuple, QemuInstrumentationAddressRangeFilter, + modules::{ + calls::FullBacktraceCollector, snapshot::SnapshotModule, EmulatorModule, + EmulatorModuleTuple, }, - hooks::{Hook, QemuHooks}, - qemu::{MemAccessInfo, QemuInitError, SyscallHookResult}, - snapshot::QemuSnapshotHelper, + qemu::{MemAccessInfo, QemuInitError}, sys::TCGTemp, - GuestAddr, Qemu, Regs, + Qemu, Regs, }; // TODO at some point, merge parts with libafl_frida @@ -155,8 +148,15 @@ impl AllocTreeItem { } use std::pin::Pin; +use libafl_qemu_sys::GuestAddr; use object::{Object, ObjectSection}; +use crate::{ + emu::EmulatorModules, + modules::{AddressFilter, StdAddressFilter}, + qemu::{Hook, QemuHooks, SyscallHookResult}, +}; + pub struct AsanGiovese { pub alloc_tree: Mutex>, pub saved_tree: IntervalTree, @@ -210,7 +210,7 @@ impl AsanGiovese { } #[must_use] - fn new(emu: Qemu) -> Pin> { + fn new(qemu_hooks: QemuHooks) -> Pin> { let res = Self { alloc_tree: Mutex::new(IntervalTree::new()), saved_tree: IntervalTree::new(), @@ -220,7 +220,7 @@ impl AsanGiovese { snapshot_shadow: true, // By default, track the dirty shadow pages }; let mut boxed = Box::pin(res); - emu.add_pre_syscall_hook(boxed.as_mut(), Self::fake_syscall); + qemu_hooks.add_pre_syscall_hook(boxed.as_mut(), Self::fake_syscall); boxed } @@ -479,6 +479,9 @@ impl AsanGiovese { } pub fn alloc_insert(&mut self, pc: GuestAddr, start: GuestAddr, end: GuestAddr) { + // # Safety + // Will access the global [`FullBacktraceCollector`]. + // Calling this function concurrently might be racey. let backtrace = FullBacktraceCollector::backtrace() .map(|r| { let mut v = r.to_vec(); @@ -504,6 +507,9 @@ impl AsanGiovese { } pub fn alloc_free(&mut self, qemu: Qemu, pc: GuestAddr, addr: GuestAddr) { + // # Safety + // Will access the global [`FullBacktraceCollector`]. + // Calling this function concurrently might be racey. let mut chunk = None; self.alloc_map_mut(addr, |interval, item| { chunk = Some(*interval); @@ -685,6 +691,7 @@ pub fn init_qemu_with_asan( let add_asan = |e: &str| "LD_PRELOAD=".to_string() + &asan_lib + " " + &e["LD_PRELOAD=".len()..]; + // TODO: adapt since qemu does not take envp anymore as parameter let mut added = false; for (k, v) in &mut *env { if k == "QEMU_SET_ENV" { @@ -717,8 +724,8 @@ pub fn init_qemu_with_asan( ASAN_INITED = true; } - let qemu = Qemu::init(args, env)?; - let rt = AsanGiovese::new(qemu); + let qemu = Qemu::init(args)?; + let rt = AsanGiovese::new(qemu.hooks()); Ok((qemu, rt)) } @@ -730,32 +737,28 @@ pub enum QemuAsanOptions { SnapshotDetectLeaks, } -pub type QemuAsanChildHelper = QemuAsanHelper; +pub type AsanChildModule = AsanModule; #[derive(Debug)] -pub struct QemuAsanHelper { +pub struct AsanModule { enabled: bool, detect_leaks: bool, empty: bool, rt: Pin>, - filter: QemuInstrumentationAddressRangeFilter, + filter: StdAddressFilter, } -impl QemuAsanHelper { +impl AsanModule { #[must_use] pub fn default(rt: Pin>) -> Self { - Self::new( - rt, - QemuInstrumentationAddressRangeFilter::None, - QemuAsanOptions::Snapshot, - ) + Self::new(rt, StdAddressFilter::default(), &QemuAsanOptions::Snapshot) } #[must_use] pub fn new( mut rt: Pin>, - filter: QemuInstrumentationAddressRangeFilter, - options: QemuAsanOptions, + filter: StdAddressFilter, + options: &QemuAsanOptions, ) -> Self { assert!(unsafe { ASAN_INITED }, "The ASan runtime is not initialized, use init_qemu_with_asan(...) instead of just Qemu::init(...)"); let (snapshot, detect_leaks) = match options { @@ -777,9 +780,9 @@ impl QemuAsanHelper { #[must_use] pub fn with_error_callback( mut rt: Pin>, - filter: QemuInstrumentationAddressRangeFilter, + filter: StdAddressFilter, error_callback: AsanErrorCallback, - options: QemuAsanOptions, + options: &QemuAsanOptions, ) -> Self { assert!(unsafe { ASAN_INITED }, "The ASan runtime is not initialized, use init_qemu_with_asan(...) instead of just Qemu::init(...)"); let (snapshot, detect_leaks) = match options { @@ -799,18 +802,25 @@ impl QemuAsanHelper { } } + /// # Safety + /// The `ASan` error report accesses [`FullBacktraceCollector`] #[must_use] - pub fn with_asan_report( + pub unsafe fn with_asan_report( rt: Pin>, - filter: QemuInstrumentationAddressRangeFilter, - options: QemuAsanOptions, + filter: StdAddressFilter, + options: &QemuAsanOptions, ) -> Self { - Self::with_error_callback(rt, filter, Box::new(asan_report), options) + Self::with_error_callback( + rt, + filter, + Box::new(|rt, qemu, pc, err| unsafe { asan_report(rt, qemu, pc, &err) }), + options, + ) } #[must_use] pub fn must_instrument(&self, addr: GuestAddr) -> bool { - self.filter.allowed(addr) + self.filter.allowed(&addr) } #[must_use] @@ -912,113 +922,121 @@ impl QemuAsanHelper { } } -impl HasInstrumentationFilter for QemuAsanHelper { - fn filter(&self) -> &QemuInstrumentationAddressRangeFilter { - &self.filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationAddressRangeFilter { - &mut self.filter - } -} - -impl QemuHelper for QemuAsanHelper +impl EmulatorModule for AsanModule where - S: UsesInput + HasMetadata, + S: Unpin + UsesInput, { + type ModuleAddressFilter = StdAddressFilter; const HOOKS_DO_SIDE_EFFECTS: bool = false; - fn init_hooks(&self, hooks: &QemuHooks) + fn post_qemu_init(&self, emulator_modules: &mut EmulatorModules) where - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, { - hooks.syscalls(Hook::Function(qasan_fake_syscall::)); + emulator_modules.syscalls(Hook::Function(qasan_fake_syscall::)); if self.rt.error_callback.is_some() { - hooks.crash_function(oncrash_asan::); + emulator_modules.crash_function(oncrash_asan::); } } - fn first_exec(&self, hooks: &QemuHooks) + fn first_exec(&mut self, emulator_modules: &mut EmulatorModules, _state: &mut S) where - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, { - hooks.reads( - Hook::Function(gen_readwrite_asan::), - Hook::Function(trace_read1_asan::), - Hook::Function(trace_read2_asan::), - Hook::Function(trace_read4_asan::), - Hook::Function(trace_read8_asan::), - Hook::Function(trace_read_n_asan::), + emulator_modules.reads( + Hook::Function(gen_readwrite_asan::), + Hook::Function(trace_read1_asan::), + Hook::Function(trace_read2_asan::), + Hook::Function(trace_read4_asan::), + Hook::Function(trace_read8_asan::), + Hook::Function(trace_read_n_asan::), ); - if hooks.match_helper::().is_none() { - hooks.writes( - Hook::Function(gen_readwrite_asan::), - Hook::Function(trace_write1_asan::), - Hook::Function(trace_write2_asan::), - Hook::Function(trace_write4_asan::), - Hook::Function(trace_write8_asan::), - Hook::Function(trace_write_n_asan::), + if emulator_modules.get::().is_none() { + emulator_modules.writes( + Hook::Function(gen_readwrite_asan::), + Hook::Function(trace_write1_asan::), + Hook::Function(trace_write2_asan::), + Hook::Function(trace_write4_asan::), + Hook::Function(trace_write8_asan::), + Hook::Function(trace_write_n_asan::), ); } else { - // track writes for both helpers as opt - hooks.writes( - Hook::Function(gen_write_asan_snapshot::), - Hook::Function(trace_write1_asan_snapshot::), - Hook::Function(trace_write2_asan_snapshot::), - Hook::Function(trace_write4_asan_snapshot::), - Hook::Function(trace_write8_asan_snapshot::), - Hook::Function(trace_write_n_asan_snapshot::), + // track writes for both modules as opt + emulator_modules.writes( + Hook::Function(gen_write_asan_snapshot::), + Hook::Function(trace_write1_asan_snapshot::), + Hook::Function(trace_write2_asan_snapshot::), + Hook::Function(trace_write4_asan_snapshot::), + Hook::Function(trace_write8_asan_snapshot::), + Hook::Function(trace_write_n_asan_snapshot::), ); } } - fn pre_exec(&mut self, qemu: Qemu, _input: &S::Input) { + fn pre_exec( + &mut self, + emulator_modules: &mut EmulatorModules, + _state: &mut S, + _input: &S::Input, + ) where + ET: EmulatorModuleTuple, + { if self.empty { - self.rt.snapshot(qemu); + self.rt.snapshot(emulator_modules.qemu()); self.empty = false; } } - fn post_exec( + fn post_exec( &mut self, - qemu: Qemu, + emulator_modules: &mut EmulatorModules, + _state: &mut S, _input: &S::Input, _observers: &mut OT, exit_kind: &mut ExitKind, ) where - OT: ObserversTuple, + OT: ObserversTuple, + ET: EmulatorModuleTuple, { - if self.reset(qemu) == AsanRollback::HasLeaks { + if self.reset(emulator_modules.qemu()) == AsanRollback::HasLeaks { *exit_kind = ExitKind::Crash; } } + + fn address_filter(&self) -> &Self::ModuleAddressFilter { + &self.filter + } + + fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter { + &mut self.filter + } } -pub fn oncrash_asan(hooks: &mut QemuHooks, target_sig: i32) +pub fn oncrash_asan(emulator_modules: &mut EmulatorModules, target_sig: i32) where - S: UsesInput, - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); let pc: GuestAddr = qemu.read_reg(Regs::Pc).unwrap(); h.rt.report(qemu, pc, AsanError::Signal(target_sig)); } -pub fn gen_readwrite_asan( - hooks: &mut QemuHooks, +pub fn gen_readwrite_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, pc: GuestAddr, _addr: *mut TCGTemp, _info: MemAccessInfo, ) -> Option where - S: UsesInput, - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); if h.must_instrument(pc) { Some(pc.into()) } else { @@ -1026,160 +1044,160 @@ where } } -pub fn trace_read1_asan( - hooks: &mut QemuHooks, +pub fn trace_read1_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.read_1(qemu, id as GuestAddr, addr); } -pub fn trace_read2_asan( - hooks: &mut QemuHooks, +pub fn trace_read2_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.read_2(qemu, id as GuestAddr, addr); } -pub fn trace_read4_asan( - hooks: &mut QemuHooks, +pub fn trace_read4_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.read_4(qemu, id as GuestAddr, addr); } -pub fn trace_read8_asan( - hooks: &mut QemuHooks, +pub fn trace_read8_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.read_8(qemu, id as GuestAddr, addr); } -pub fn trace_read_n_asan( - hooks: &mut QemuHooks, +pub fn trace_read_n_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, size: usize, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.read_n(qemu, id as GuestAddr, addr, size); } -pub fn trace_write1_asan( - hooks: &mut QemuHooks, +pub fn trace_write1_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.write_1(qemu, id as GuestAddr, addr); } -pub fn trace_write2_asan( - hooks: &mut QemuHooks, +pub fn trace_write2_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.write_2(qemu, id as GuestAddr, addr); } -pub fn trace_write4_asan( - hooks: &mut QemuHooks, +pub fn trace_write4_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.write_4(qemu, id as GuestAddr, addr); } -pub fn trace_write8_asan( - hooks: &mut QemuHooks, +pub fn trace_write8_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.write_8(qemu, id as GuestAddr, addr); } -pub fn trace_write_n_asan( - hooks: &mut QemuHooks, +pub fn trace_write_n_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, size: usize, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.read_n(qemu, id as GuestAddr, addr, size); } -pub fn gen_write_asan_snapshot( - hooks: &mut QemuHooks, +pub fn gen_write_asan_snapshot( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, pc: GuestAddr, _addr: *mut TCGTemp, _info: MemAccessInfo, ) -> Option where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); if h.must_instrument(pc) { Some(pc.into()) } else { @@ -1187,100 +1205,100 @@ where } } -pub fn trace_write1_asan_snapshot( - hooks: &mut QemuHooks, +pub fn trace_write1_asan_snapshot( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { if id != 0 { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.write_1(qemu, id as GuestAddr, addr); } - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(addr, 1); } -pub fn trace_write2_asan_snapshot( - hooks: &mut QemuHooks, +pub fn trace_write2_asan_snapshot( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { if id != 0 { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.write_2(qemu, id as GuestAddr, addr); } - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(addr, 2); } -pub fn trace_write4_asan_snapshot( - hooks: &mut QemuHooks, +pub fn trace_write4_asan_snapshot( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { if id != 0 { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.write_4(qemu, id as GuestAddr, addr); } - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(addr, 4); } -pub fn trace_write8_asan_snapshot( - hooks: &mut QemuHooks, +pub fn trace_write8_asan_snapshot( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { if id != 0 { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.write_8(qemu, id as GuestAddr, addr); } - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(addr, 8); } -pub fn trace_write_n_asan_snapshot( - hooks: &mut QemuHooks, +pub fn trace_write_n_asan_snapshot( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, id: u64, addr: GuestAddr, size: usize, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { if id != 0 { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); h.read_n(qemu, id as GuestAddr, addr, size); } - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(addr, size); } #[allow(clippy::too_many_arguments)] -pub fn qasan_fake_syscall( - hooks: &mut QemuHooks, +pub fn qasan_fake_syscall( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, sys_num: i32, a0: GuestAddr, @@ -1293,12 +1311,12 @@ pub fn qasan_fake_syscall( _a7: GuestAddr, ) -> SyscallHookResult where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { if sys_num == QASAN_FAKESYS_NR { - let qemu = *hooks.qemu(); - let h = hooks.match_helper_mut::().unwrap(); + let qemu = emulator_modules.qemu(); + let h = emulator_modules.get_mut::().unwrap(); match QasanAction::try_from(a0).expect("Invalid QASan action number") { QasanAction::CheckLoad => { let pc: GuestAddr = qemu.read_reg(Regs::Pc).unwrap(); @@ -1513,9 +1531,12 @@ mod addr2line_legacy { } } +/// # Safety +/// Will access the global [`FullBacktraceCollector`]. +/// Calling this function concurrently might be racey. #[allow(clippy::unnecessary_cast)] #[allow(clippy::too_many_lines)] -pub fn asan_report(rt: &AsanGiovese, qemu: Qemu, pc: GuestAddr, err: AsanError) { +pub unsafe fn asan_report(rt: &AsanGiovese, qemu: Qemu, pc: GuestAddr, err: &AsanError) { let mut regions = HashMap::new(); for region in qemu.mappings() { if let Some(path) = region.path() { @@ -1647,7 +1668,7 @@ pub fn asan_report(rt: &AsanGiovese, qemu: Qemu, pc: GuestAddr, err: AsanError) } let addr = match err { AsanError::Read(addr, _) | AsanError::Write(addr, _) | AsanError::BadFree(addr, _) => { - Some(addr) + Some(*addr) } AsanError::MemLeak(_) | AsanError::Signal(_) => None, }; diff --git a/libafl_qemu/src/helpers/asan_guest.rs b/libafl_qemu/src/modules/usermode/asan_guest.rs similarity index 67% rename from libafl_qemu/src/helpers/asan_guest.rs rename to libafl_qemu/src/modules/usermode/asan_guest.rs index c5a70886ee..567d98048e 100644 --- a/libafl_qemu/src/helpers/asan_guest.rs +++ b/libafl_qemu/src/modules/usermode/asan_guest.rs @@ -7,19 +7,16 @@ use std::{ path::PathBuf, }; -use libafl::{inputs::UsesInput, HasMetadata}; +use libafl::inputs::UsesInput; +use libafl_qemu_sys::{GuestAddr, MapInfo}; #[cfg(not(feature = "clippy"))] use crate::sys::libafl_tcg_gen_asan; use crate::{ - helpers::{ - HasInstrumentationFilter, IsFilter, QemuHelper, QemuHelperTuple, - QemuInstrumentationAddressRangeFilter, - }, - hooks::{Hook, QemuHooks}, - qemu::{MemAccessInfo, Qemu, QemuInitError}, + emu::EmulatorModules, + modules::{AddressFilter, EmulatorModule, EmulatorModuleTuple, StdAddressFilter}, + qemu::{Hook, MemAccessInfo, Qemu, QemuInitError}, sys::TCGTemp, - GuestAddr, MapInfo, }; static mut ASAN_GUEST_INITED: bool = false; @@ -94,7 +91,7 @@ pub fn init_qemu_with_asan_guest( ASAN_GUEST_INITED = true; } - let emu = Qemu::init(args, env)?; + let emu = Qemu::init(args)?; Ok((emu, asan_lib)) } @@ -121,13 +118,18 @@ impl From<&MapInfo> for QemuAsanGuestMapping { } #[derive(Debug)] -pub struct QemuAsanGuestHelper { - filter: QemuInstrumentationAddressRangeFilter, +pub struct AsanGuestModule { + filter: F, mappings: Vec, } -#[cfg(any(cpu_target = "aarch64", cpu_target = "x86_64", feature = "clippy"))] -impl QemuAsanGuestHelper { +#[cfg(any( + cpu_target = "aarch64", + cpu_target = "x86_64", + cpu_target = "riscv64", + feature = "clippy" +))] +impl AsanGuestModule { const HIGH_SHADOW_START: GuestAddr = 0x02008fff7000; const HIGH_SHADOW_END: GuestAddr = 0x10007fff7fff; const LOW_SHADOW_START: GuestAddr = 0x00007fff8000; @@ -138,28 +140,34 @@ impl QemuAsanGuestHelper { cpu_target = "arm", cpu_target = "i386", cpu_target = "mips", - cpu_target = "ppc" + cpu_target = "ppc", + cpu_target = "riscv32", ))] -impl QemuAsanGuestHelper { +impl AsanGuestModule { const HIGH_SHADOW_START: GuestAddr = 0x28000000; const HIGH_SHADOW_END: GuestAddr = 0x3fffffff; const LOW_SHADOW_START: GuestAddr = 0x20000000; const LOW_SHADOW_END: GuestAddr = 0x23ffffff; } -impl QemuAsanGuestHelper { +impl AsanGuestModule { #[must_use] - pub fn default(emu: &Qemu, asan: String) -> Self { - Self::new(emu, asan, QemuInstrumentationAddressRangeFilter::None) + pub fn default(qemu: Qemu, asan: &str) -> Self { + Self::new(qemu, asan, StdAddressFilter::default()) } +} +impl AsanGuestModule +where + F: AddressFilter, +{ #[must_use] - pub fn new(emu: &Qemu, asan: String, filter: QemuInstrumentationAddressRangeFilter) -> Self { - for mapping in emu.mappings() { + pub fn new(qemu: Qemu, asan: &str, filter: F) -> Self { + for mapping in qemu.mappings() { println!("mapping: {mapping:#?}"); } - let mappings = emu + let mappings = qemu .mappings() .map(|m| QemuAsanGuestMapping::from(&m)) .collect::>(); @@ -193,32 +201,23 @@ impl QemuAsanGuestHelper { #[must_use] pub fn must_instrument(&self, addr: GuestAddr) -> bool { - self.filter.allowed(addr) + self.filter.allowed(&addr) } } -impl HasInstrumentationFilter for QemuAsanGuestHelper { - fn filter(&self) -> &QemuInstrumentationAddressRangeFilter { - &self.filter - } - - fn filter_mut(&mut self) -> &mut QemuInstrumentationAddressRangeFilter { - &mut self.filter - } -} - -fn gen_readwrite_guest_asan( - hooks: &mut QemuHooks, +fn gen_readwrite_guest_asan( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, pc: GuestAddr, addr: *mut TCGTemp, info: MemAccessInfo, ) -> Option where - S: UsesInput, - QT: QemuHelperTuple, + F: AddressFilter, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::>().unwrap(); if !h.must_instrument(pc) { return None; } @@ -244,55 +243,67 @@ where #[allow(unused_variables)] unsafe fn libafl_tcg_gen_asan(addr: *mut TCGTemp, size: usize) {} -fn guest_trace_error_asan( - _hooks: &mut QemuHooks, +fn guest_trace_error_asan( + _emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, _id: u64, _addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { panic!("I really shouldn't be here"); } -fn guest_trace_error_n_asan( - _hooks: &mut QemuHooks, +fn guest_trace_error_n_asan( + _emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, _id: u64, _addr: GuestAddr, _n: usize, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { panic!("I really shouldn't be here either"); } -impl QemuHelper for QemuAsanGuestHelper +impl EmulatorModule for AsanGuestModule where - S: UsesInput + HasMetadata, + F: AddressFilter, + S: Unpin + UsesInput, { - fn first_exec(&self, hooks: &QemuHooks) + type ModuleAddressFilter = F; + + fn first_exec(&mut self, emulator_modules: &mut EmulatorModules, _state: &mut S) where - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, { - hooks.reads( - Hook::Function(gen_readwrite_guest_asan::), - Hook::Function(guest_trace_error_asan::), - Hook::Function(guest_trace_error_asan::), - Hook::Function(guest_trace_error_asan::), - Hook::Function(guest_trace_error_asan::), - Hook::Function(guest_trace_error_n_asan::), + emulator_modules.reads( + Hook::Function(gen_readwrite_guest_asan::), + Hook::Function(guest_trace_error_asan::), + Hook::Function(guest_trace_error_asan::), + Hook::Function(guest_trace_error_asan::), + Hook::Function(guest_trace_error_asan::), + Hook::Function(guest_trace_error_n_asan::), ); - hooks.writes( - Hook::Function(gen_readwrite_guest_asan::), - Hook::Function(guest_trace_error_asan::), - Hook::Function(guest_trace_error_asan::), - Hook::Function(guest_trace_error_asan::), - Hook::Function(guest_trace_error_asan::), - Hook::Function(guest_trace_error_n_asan::), + emulator_modules.writes( + Hook::Function(gen_readwrite_guest_asan::), + Hook::Function(guest_trace_error_asan::), + Hook::Function(guest_trace_error_asan::), + Hook::Function(guest_trace_error_asan::), + Hook::Function(guest_trace_error_asan::), + Hook::Function(guest_trace_error_n_asan::), ); } + + fn address_filter(&self) -> &Self::ModuleAddressFilter { + &self.filter + } + + fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter { + &mut self.filter + } } diff --git a/libafl_qemu/src/helpers/injections.rs b/libafl_qemu/src/modules/usermode/injections.rs similarity index 87% rename from libafl_qemu/src/helpers/injections.rs rename to libafl_qemu/src/modules/usermode/injections.rs index d5a0ca1130..9d62e7b017 100644 --- a/libafl_qemu/src/helpers/injections.rs +++ b/libafl_qemu/src/modules/usermode/injections.rs @@ -21,9 +21,13 @@ use serde::{Deserialize, Serialize}; #[cfg(not(cpu_target = "hexagon"))] use crate::SYS_execve; use crate::{ - elf::EasyElf, qemu::ArchExtras, CallingConvention, Hook, Qemu, QemuHelper, QemuHelperTuple, - QemuHooks, SyscallHookResult, + elf::EasyElf, + emu::EmulatorModules, + modules::{EmulatorModule, EmulatorModuleTuple, NopAddressFilter, NOP_ADDRESS_FILTER}, + qemu::{ArchExtras, Hook, SyscallHookResult}, + CallingConvention, Qemu, }; + #[cfg(cpu_target = "hexagon")] /// Hexagon syscalls are not currently supported by the `syscalls` crate, so we just paste this here for now. /// @@ -146,13 +150,13 @@ pub struct Match { } #[derive(Debug)] -pub struct QemuInjectionHelper { +pub struct InjectionModule { pub tokens: Vec, definitions: HashMap, matches_list: Vec, } -impl QemuInjectionHelper { +impl InjectionModule { /// `configure_injections` is the main function to activate the injection /// vulnerability detection feature. pub fn from_yaml + Display>(yaml_file: P) -> Result { @@ -207,20 +211,20 @@ impl QemuInjectionHelper { }) } - fn on_call_check>( - hooks: &mut QemuHooks, - id: usize, - parameter: u8, - ) { - let qemu = hooks.qemu(); + fn on_call_check(emulator_modules: &mut EmulatorModules, id: usize, parameter: u8) + where + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, + { + let qemu = emulator_modules.qemu(); let reg: GuestAddr = qemu .current_cpu() .unwrap() .read_function_argument(CallingConvention::Cdecl, parameter) .unwrap_or_default(); - let helper = hooks.helpers_mut().match_first_type_mut::().unwrap(); - let matches = &helper.matches_list[id]; + let module = emulator_modules.get_mut::().unwrap(); + let matches = &module.matches_list[id]; //println!("reg value = {:x}", reg); @@ -252,22 +256,24 @@ impl QemuInjectionHelper { } } -impl QemuHelper for QemuInjectionHelper +impl EmulatorModule for InjectionModule where - S: UsesInput, + S: Unpin + UsesInput, { - fn init_hooks(&self, hooks: &QemuHooks) + type ModuleAddressFilter = NopAddressFilter; + + fn post_qemu_init(&self, emulator_modules: &mut EmulatorModules) where - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, { - hooks.syscalls(Hook::Function(syscall_hook::)); + emulator_modules.syscalls(Hook::Function(syscall_hook::)); } - fn first_exec(&self, hooks: &QemuHooks) + fn first_exec(&mut self, emulator_modules: &mut EmulatorModules, _state: &mut S) where - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, { - let qemu = *hooks.qemu(); + let qemu = emulator_modules.qemu(); let mut libs: Vec = Vec::new(); for region in qemu.mappings() { @@ -303,9 +309,8 @@ where } else { libs.iter() .filter_map(|lib| find_function(qemu, &lib.name, name, lib.off).unwrap()) - .map(|func_pc| { - log::info!("Injections: Function {name} found at {func_pc:#x}",); - func_pc + .inspect(|&func_pc| { + log::info!("Injections: Function {name} found at {func_pc:#x}"); }) .collect() }; @@ -317,7 +322,7 @@ where let param = func_definition.param; for hook_addr in hook_addrs { - hooks.instruction( + emulator_modules.instructions( hook_addr, Hook::Closure(Box::new(move |hooks, _state, _guest_addr| { Self::on_call_check(hooks, id, param); @@ -328,13 +333,25 @@ where } } } + + fn address_filter(&self) -> &Self::ModuleAddressFilter { + &NopAddressFilter + } + + fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter { + unsafe { (&raw mut NOP_ADDRESS_FILTER).as_mut().unwrap().get_mut() } + } } -fn syscall_hook( - hooks: &mut QemuHooks, // our instantiated QemuHooks +#[allow(clippy::too_many_arguments)] +fn syscall_hook( + // Our instantiated [`EmulatorModules`] + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, - syscall: i32, // syscall number - x0: GuestAddr, // registers ... + // Syscall number + syscall: i32, + // Registers + x0: GuestAddr, x1: GuestAddr, _x2: GuestAddr, _x3: GuestAddr, @@ -344,16 +361,13 @@ fn syscall_hook( _x7: GuestAddr, ) -> SyscallHookResult where - QT: QemuHelperTuple, - S: UsesInput, + ET: EmulatorModuleTuple, + S: Unpin + UsesInput, { log::trace!("syscall_hook {syscall} {SYS_execve}"); debug_assert!(i32::try_from(SYS_execve).is_ok()); if syscall == SYS_execve as i32 { - let _helper = hooks - .helpers_mut() - .match_first_type_mut::() - .unwrap(); + let _module = emulator_modules.get_mut::().unwrap(); if x0 > 0 && x1 > 0 { let c_array = x1 as *const *const c_char; let cmd = unsafe { diff --git a/libafl_qemu/src/modules/usermode/mod.rs b/libafl_qemu/src/modules/usermode/mod.rs new file mode 100644 index 0000000000..1d9cc503d2 --- /dev/null +++ b/libafl_qemu/src/modules/usermode/mod.rs @@ -0,0 +1,19 @@ +#[cfg(feature = "injections")] +pub mod injections; +#[cfg(feature = "injections")] +pub use injections::InjectionModule; + +#[cfg(not(cpu_target = "hexagon"))] +pub mod snapshot; +#[cfg(not(cpu_target = "hexagon"))] +pub use snapshot::{IntervalSnapshotFilter, SnapshotModule}; + +#[cfg(not(cpu_target = "hexagon"))] +pub mod asan; +#[cfg(not(cpu_target = "hexagon"))] +pub use asan::{init_qemu_with_asan, AsanModule}; + +#[cfg(not(cpu_target = "hexagon"))] +pub mod asan_guest; +#[cfg(not(cpu_target = "hexagon"))] +pub use asan_guest::{init_qemu_with_asan_guest, AsanGuestModule}; diff --git a/libafl_qemu/src/helpers/snapshot.rs b/libafl_qemu/src/modules/usermode/snapshot.rs similarity index 83% rename from libafl_qemu/src/helpers/snapshot.rs rename to libafl_qemu/src/modules/usermode/snapshot.rs index 4c47162403..4eabc858c2 100644 --- a/libafl_qemu/src/helpers/snapshot.rs +++ b/libafl_qemu/src/modules/usermode/snapshot.rs @@ -1,42 +1,42 @@ -use std::{ - cell::UnsafeCell, - collections::{HashMap, HashSet}, - mem::MaybeUninit, - sync::Mutex, -}; +use std::{cell::UnsafeCell, mem::MaybeUninit, sync::Mutex}; -use libafl::{inputs::UsesInput, HasMetadata}; +use hashbrown::{HashMap, HashSet}; +use libafl::inputs::UsesInput; use libafl_qemu_sys::{GuestAddr, MmapPerms}; use meminterval::{Interval, IntervalTree}; use thread_local::ThreadLocal; #[cfg(any(cpu_target = "arm", cpu_target = "i386", cpu_target = "mips"))] use crate::SYS_fstatat64; -#[cfg(not(cpu_target = "arm"))] +#[cfg(not(any(cpu_target = "arm", cpu_target = "riscv32")))] use crate::SYS_mmap; -#[cfg(any(cpu_target = "arm", cpu_target = "mips"))] +#[cfg(any(cpu_target = "arm", cpu_target = "mips", cpu_target = "riscv32"))] use crate::SYS_mmap2; #[cfg(not(any( cpu_target = "arm", cpu_target = "mips", cpu_target = "i386", - cpu_target = "ppc" + cpu_target = "ppc", + cpu_target = "riscv32", )))] use crate::SYS_newfstatat; use crate::{ - asan::QemuAsanHelper, - helpers::{QemuHelper, QemuHelperTuple, Range}, - hooks::{Hook, QemuHooks}, - qemu::SyscallHookResult, - Qemu, SYS_brk, SYS_fstat, SYS_fstatfs, SYS_futex, SYS_getrandom, SYS_mprotect, SYS_mremap, - SYS_munmap, SYS_pread64, SYS_read, SYS_readlinkat, SYS_statfs, + emu::EmulatorModules, + modules::{ + asan::AsanModule, EmulatorModule, EmulatorModuleTuple, NopAddressFilter, Range, + NOP_ADDRESS_FILTER, + }, + qemu::{Hook, SyscallHookResult}, + Qemu, SYS_brk, SYS_mprotect, SYS_mremap, SYS_munmap, SYS_pread64, SYS_read, SYS_readlinkat, }; +#[cfg(not(cpu_target = "riscv32"))] +use crate::{SYS_fstat, SYS_fstatfs, SYS_futex, SYS_getrandom, SYS_statfs}; // TODO use the functions provided by Qemu pub const SNAPSHOT_PAGE_SIZE: usize = 4096; pub const SNAPSHOT_PAGE_MASK: GuestAddr = !(SNAPSHOT_PAGE_SIZE as GuestAddr - 1); -pub type StopExecutionCallback = Box; +pub type StopExecutionCallback = Box; #[derive(Clone, Debug)] pub struct SnapshotPageInfo { @@ -84,7 +84,7 @@ pub enum IntervalSnapshotFilter { DenyList(Vec>), } -pub struct QemuSnapshotHelper { +pub struct SnapshotModule { pub accesses: ThreadLocal>, pub maps: MappingInfo, pub new_maps: Mutex, @@ -98,9 +98,9 @@ pub struct QemuSnapshotHelper { pub interval_filter: Vec, } -impl core::fmt::Debug for QemuSnapshotHelper { +impl core::fmt::Debug for SnapshotModule { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - f.debug_struct("QemuSnapshotHelper") + f.debug_struct("SnapshotModule") .field("accesses", &self.accesses) .field("new_maps", &self.new_maps) .field("pages", &self.pages) @@ -112,7 +112,7 @@ impl core::fmt::Debug for QemuSnapshotHelper { } } -impl QemuSnapshotHelper { +impl SnapshotModule { #[must_use] pub fn new() -> Self { Self { @@ -210,7 +210,7 @@ impl QemuSnapshotHelper { // TODO not just for R pages unsafe { info.data = Some(Box::new(core::mem::zeroed())); - qemu.read_mem(addr, &mut info.data.as_mut().unwrap()[..]); + qemu.read_mem_unchecked(addr, &mut info.data.as_mut().unwrap()[..]); } } self.pages.insert(addr, info); @@ -301,7 +301,8 @@ impl QemuSnapshotHelper { qemu.read_mem( addr, current_page_content.as_mut_ptr().as_mut().unwrap(), - ); + ) + .unwrap(); } let current_page_content: &mut [u8; SNAPSHOT_PAGE_SIZE] = @@ -413,7 +414,7 @@ impl QemuSnapshotHelper { return true; // Restore later } - unsafe { qemu.write_mem(*page, &data[..]) }; + unsafe { qemu.write_mem_unchecked(*page, &data[..]) }; } else { panic!("Cannot restored a dirty but unsaved page"); } @@ -448,7 +449,7 @@ impl QemuSnapshotHelper { if let Some(info) = self.pages.get_mut(page) { // TODO avoid duplicated memcpy if let Some(data) = info.data.as_ref() { - unsafe { qemu.write_mem(*page, &data[..]) }; + unsafe { qemu.write_mem_unchecked(*page, &data[..]) }; } else { panic!("Cannot restored a dirty but unsaved page"); } @@ -512,7 +513,7 @@ impl QemuSnapshotHelper { if self.mmap_limit != 0 && total_size > self.mmap_limit { let mut cb = self.stop_execution.take().unwrap(); let qemu = Qemu::get().unwrap(); - cb(self, &qemu); + cb(self, qemu); self.stop_execution = Some(cb); } } @@ -662,78 +663,95 @@ impl QemuSnapshotHelper { } } -impl Default for QemuSnapshotHelper { +impl Default for SnapshotModule { fn default() -> Self { Self::new() } } -impl QemuHelper for QemuSnapshotHelper +impl EmulatorModule for SnapshotModule where - S: UsesInput + HasMetadata, + S: Unpin + UsesInput, { - fn first_exec(&self, hooks: &QemuHooks) + type ModuleAddressFilter = NopAddressFilter; + + fn post_qemu_init(&self, emulator_modules: &mut EmulatorModules) where - QT: QemuHelperTuple, + ET: EmulatorModuleTuple, { - if hooks.match_helper::().is_none() { - // The ASan helper, if present, will call the tracer hook for the snapshot helper as opt - hooks.writes( + if emulator_modules.get::().is_none() { + // The ASan module, if present, will call the tracer hook for the snapshot helper as opt + emulator_modules.writes( Hook::Empty, - Hook::Function(trace_write_snapshot::), - Hook::Function(trace_write_snapshot::), - Hook::Function(trace_write_snapshot::), - Hook::Function(trace_write_snapshot::), - Hook::Function(trace_write_n_snapshot::), + Hook::Function(trace_write_snapshot::), + Hook::Function(trace_write_snapshot::), + Hook::Function(trace_write_snapshot::), + Hook::Function(trace_write_snapshot::), + Hook::Function(trace_write_n_snapshot::), ); } if !self.accurate_unmap { - hooks.syscalls(Hook::Function(filter_mmap_snapshot::)); + emulator_modules.syscalls(Hook::Function(filter_mmap_snapshot::)); } - hooks.after_syscalls(Hook::Function(trace_mmap_snapshot::)); + emulator_modules.after_syscalls(Hook::Function(trace_mmap_snapshot::)); } - fn pre_exec(&mut self, qemu: Qemu, _input: &S::Input) { + fn pre_exec( + &mut self, + emulator_modules: &mut EmulatorModules, + _state: &mut S, + _input: &S::Input, + ) where + ET: EmulatorModuleTuple, + { if self.empty { - self.snapshot(qemu); + self.snapshot(emulator_modules.qemu()); } else { - self.reset(qemu); + self.reset(emulator_modules.qemu()); } } + + fn address_filter(&self) -> &Self::ModuleAddressFilter { + &NopAddressFilter + } + + fn address_filter_mut(&mut self) -> &mut Self::ModuleAddressFilter { + unsafe { (&raw mut NOP_ADDRESS_FILTER).as_mut().unwrap().get_mut() } + } } -pub fn trace_write_snapshot( - hooks: &mut QemuHooks, +pub fn trace_write_snapshot( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, _id: u64, addr: GuestAddr, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(addr, SIZE); } -pub fn trace_write_n_snapshot( - hooks: &mut QemuHooks, +pub fn trace_write_n_snapshot( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, _id: u64, addr: GuestAddr, size: usize, ) where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(addr, size); } #[allow(clippy::too_many_arguments)] #[allow(non_upper_case_globals)] -pub fn filter_mmap_snapshot( - hooks: &mut QemuHooks, +pub fn filter_mmap_snapshot( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, sys_num: i32, a0: GuestAddr, @@ -746,11 +764,11 @@ pub fn filter_mmap_snapshot( _a7: GuestAddr, ) -> SyscallHookResult where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { if i64::from(sys_num) == SYS_munmap { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); if !h.is_unmap_allowed(a0 as GuestAddr, a1 as usize) { return SyscallHookResult::new(Some(0)); } @@ -758,10 +776,10 @@ where SyscallHookResult::new(None) } -#[allow(clippy::too_many_arguments)] +#[allow(clippy::too_many_arguments, clippy::too_many_lines)] #[allow(non_upper_case_globals)] -pub fn trace_mmap_snapshot( - hooks: &mut QemuHooks, +pub fn trace_mmap_snapshot( + emulator_modules: &mut EmulatorModules, _state: Option<&mut S>, result: GuestAddr, sys_num: i32, @@ -775,52 +793,56 @@ pub fn trace_mmap_snapshot( _a7: GuestAddr, ) -> GuestAddr where - S: UsesInput, - QT: QemuHelperTuple, + S: Unpin + UsesInput, + ET: EmulatorModuleTuple, { // NOT A COMPLETE LIST OF MEMORY EFFECTS match i64::from(sys_num) { SYS_read | SYS_pread64 => { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(a1, a2 as usize); } SYS_readlinkat => { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(a2, a3 as usize); } + #[cfg(not(cpu_target = "riscv32"))] SYS_futex => { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(a0, a3 as usize); } #[cfg(not(any( cpu_target = "arm", cpu_target = "i386", cpu_target = "mips", - cpu_target = "ppc" + cpu_target = "ppc", + cpu_target = "riscv32" )))] SYS_newfstatat => { if a2 != 0 { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(a2, 4096); // stat is not greater than a page } } #[cfg(any(cpu_target = "arm", cpu_target = "mips", cpu_target = "i386"))] SYS_fstatat64 => { if a2 != 0 { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(a2, 4096); // stat is not greater than a page } } - SYS_statfs | SYS_fstatfs | SYS_fstat => { - let h = hooks.match_helper_mut::().unwrap(); + #[cfg(not(cpu_target = "riscv32"))] + SYS_statfs | SYS_fstat | SYS_fstatfs => { + let h = emulator_modules.get_mut::().unwrap(); h.access(a1, 4096); // stat is not greater than a page } + #[cfg(not(cpu_target = "riscv32"))] SYS_getrandom => { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.access(a0, a1 as usize); } SYS_brk => { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); if h.brk != result && result != 0 { /* brk has changed. we change mapping from the snapshotted brk address to the new target_brk * If no brk mapping has been made until now, change_mapped won't change anything and just create a new mapping. @@ -839,34 +861,34 @@ where // TODO handle huge pages - #[cfg(any(cpu_target = "arm", cpu_target = "mips"))] + #[cfg(any(cpu_target = "arm", cpu_target = "mips", cpu_target = "riscv32"))] if sys_const == SYS_mmap2 { if let Ok(prot) = MmapPerms::try_from(a2 as i32) { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.add_mapped(result, a1 as usize, Some(prot)); } } - #[cfg(not(cpu_target = "arm"))] + #[cfg(not(any(cpu_target = "arm", cpu_target = "riscv32")))] if sys_const == SYS_mmap { if let Ok(prot) = MmapPerms::try_from(a2 as i32) { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.add_mapped(result, a1 as usize, Some(prot)); } } if sys_const == SYS_mremap { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); // TODO get the old permissions from the removed mapping h.remove_mapped(a0, a1 as usize); h.add_mapped(result, a2 as usize, None); } else if sys_const == SYS_mprotect { if let Ok(prot) = MmapPerms::try_from(a2 as i32) { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); h.change_mapped(a0, a1 as usize, Some(prot)); } } else if sys_const == SYS_munmap { - let h = hooks.match_helper_mut::().unwrap(); + let h = emulator_modules.get_mut::().unwrap(); if !h.accurate_unmap && !h.is_unmap_allowed(a0, a1 as usize) { h.remove_mapped(a0, a1 as usize); } diff --git a/libafl_qemu/src/qemu/config.rs b/libafl_qemu/src/qemu/config.rs new file mode 100644 index 0000000000..b17b3e5913 --- /dev/null +++ b/libafl_qemu/src/qemu/config.rs @@ -0,0 +1,406 @@ +use core::{ + fmt, + fmt::{Display, Formatter}, +}; +use std::{ + path::{Path, PathBuf}, + sync::OnceLock, +}; + +use getset::Getters; +use libafl_derive; +use strum_macros; +use typed_builder::TypedBuilder; + +use crate::{Qemu, QemuInitError}; + +pub(super) static QEMU_CONFIG: OnceLock = OnceLock::new(); + +#[cfg(feature = "systemmode")] +#[derive(Debug, strum_macros::Display, Clone)] +#[strum(prefix = "-accel ", serialize_all = "lowercase")] +pub enum Accelerator { + Kvm, + Tcg, +} + +#[derive(Debug, strum_macros::Display, Clone)] +#[strum(prefix = "if=", serialize_all = "lowercase")] +pub enum DriveInterface { + Floppy, + Ide, + Mtd, + None, + Pflash, + Scsi, + Sd, + Virtio, +} + +#[derive(Debug, strum_macros::Display, Clone)] +#[strum(prefix = "format=", serialize_all = "lowercase")] +pub enum DiskImageFileFormat { + Qcow2, + Raw, +} + +#[derive(Debug, Clone, Default, TypedBuilder)] +pub struct Drive { + #[builder(default, setter(strip_option, into))] + file: Option, + #[builder(default, setter(strip_option))] + format: Option, + #[builder(default, setter(strip_option))] + interface: Option, +} + +impl Display for Drive { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "-drive")?; + + let mut is_first_option = true; + let mut separator = || { + if is_first_option { + is_first_option = false; + " " + } else { + "," + } + }; + + if let Some(file) = &self.file { + write!(f, "{}file={}", separator(), file.to_str().unwrap())?; + } + if let Some(format) = &self.format { + write!(f, "{}{format}", separator())?; + } + if let Some(interface) = &self.interface { + write!(f, "{}{interface}", separator())?; + } + + Ok(()) + } +} + +#[derive(Debug, strum_macros::Display, Clone)] +#[strum(prefix = "-serial ", serialize_all = "lowercase")] +pub enum Serial { + None, + Null, + Stdio, +} + +#[derive(Debug, strum_macros::Display, Clone)] +#[strum(prefix = "-monitor ", serialize_all = "lowercase")] +pub enum Monitor { + None, + Null, + Stdio, +} + +/// Set the directory for the BIOS, VGA BIOS and keymaps. +/// Corresponds to the `-L` option of QEMU. +#[cfg(feature = "systemmode")] +#[derive(Debug, Clone)] +pub struct Bios { + path: PathBuf, +} + +#[cfg(feature = "systemmode")] +impl Display for Bios { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "-L {}", self.path.to_str().unwrap()) + } +} + +#[cfg(feature = "systemmode")] +impl> From for Bios { + fn from(path: R) -> Self { + Self { + path: path.as_ref().to_path_buf(), + } + } +} + +#[cfg(feature = "systemmode")] +#[derive(Debug, Clone)] +pub struct Kernel { + path: PathBuf, +} + +#[cfg(feature = "systemmode")] +impl Display for Kernel { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "-kernel {}", self.path.to_str().unwrap()) + } +} + +#[cfg(feature = "systemmode")] +impl> From for Kernel { + fn from(path: R) -> Self { + Self { + path: path.as_ref().to_path_buf(), + } + } +} + +#[derive(Debug, Clone)] +pub struct LoadVM { + path: PathBuf, +} + +impl Display for LoadVM { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "-loadvm {}", self.path.to_str().unwrap()) + } +} + +impl> From for LoadVM { + fn from(path: R) -> Self { + Self { + path: path.as_ref().to_path_buf(), + } + } +} + +#[derive(Debug, Clone)] +pub struct Machine { + name: String, +} + +impl Display for Machine { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "-machine {}", self.name) + } +} + +impl> From for Machine { + fn from(name: R) -> Self { + Self { + name: name.as_ref().to_string(), + } + } +} + +#[derive(Debug, Clone, strum_macros::Display)] +pub enum Snapshot { + #[strum(serialize = "-snapshot")] + ENABLE, + #[strum(serialize = "")] + DISABLE, +} + +impl From for Snapshot { + fn from(snapshot: bool) -> Self { + if snapshot { + Snapshot::ENABLE + } else { + Snapshot::DISABLE + } + } +} + +/// When set to DISABLE, corresponds to the `-S` option of QEMU. +#[derive(Debug, Clone, strum_macros::Display)] +pub enum StartCPU { + #[strum(serialize = "")] + ENABLE, + #[strum(serialize = "-S")] + DISABLE, +} + +impl From for StartCPU { + fn from(start_cpu: bool) -> Self { + if start_cpu { + StartCPU::ENABLE + } else { + StartCPU::DISABLE + } + } +} + +#[derive(Debug, Clone, strum_macros::Display)] +pub enum NoGraphic { + #[strum(serialize = "-nographic")] + ENABLE, + #[strum(serialize = "")] + DISABLE, +} + +impl From for NoGraphic { + fn from(no_graphic: bool) -> Self { + if no_graphic { + NoGraphic::ENABLE + } else { + NoGraphic::DISABLE + } + } +} + +#[derive(Debug, Clone)] +pub enum RamSize { + MB(u32), + GB(u32), +} + +impl Display for RamSize { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + RamSize::MB(mb) => write!(f, "-m {mb}M"), + RamSize::GB(gb) => write!(f, "-m {gb}G"), + } + } +} + +#[derive(Debug, Clone)] +pub struct SmpCpus { + pub cpus: u32, +} + +impl Display for SmpCpus { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "-smp {}", self.cpus) + } +} + +#[derive(Debug, Clone, strum_macros::Display)] +pub enum VgaPci { + #[strum(serialize = "-device VGA")] + ENABLE, + #[strum(serialize = "")] + DISABLE, +} + +impl From for VgaPci { + fn from(vga_pci: bool) -> Self { + if vga_pci { + VgaPci::ENABLE + } else { + VgaPci::DISABLE + } + } +} + +#[cfg(feature = "usermode")] +#[derive(Debug, Clone)] +pub struct Program { + path: PathBuf, +} + +#[cfg(feature = "usermode")] +impl Display for Program { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.path.to_str().unwrap()) + } +} + +#[cfg(feature = "usermode")] +impl> From for Program { + fn from(path: R) -> Self { + Self { + path: path.as_ref().to_path_buf(), + } + } +} + +#[derive(Debug, Clone, libafl_derive::Display, TypedBuilder, Getters)] +#[builder(build_method(into = Result), builder_method(vis = "pub(crate)", + doc = "Since Qemu is a zero sized struct, this is not a completely standard builder pattern. \ + The Qemu configuration is not stored in the Qemu struct after build() but in QEMU_CONFIG \ + Therefore, to use the derived builder and avoid boilerplate a builder for QemuConfig is \ + derived. \ + The QemuConfig::builder is called in Qemu::builder() which is the only place where it should \ + be called, in this way the one to one matching of Qemu and QemuConfig is enforced. Therefore \ + its visibility is pub(crate)"))] +#[getset(get = "pub")] +pub struct QemuConfig { + #[cfg(feature = "systemmode")] + #[builder(default, setter(strip_option))] + accelerator: Option, + #[cfg(feature = "systemmode")] + #[builder(default, setter(strip_option, into))] + bios: Option, + #[builder(default, setter(into))] + drives: Vec, + #[cfg(feature = "systemmode")] + #[builder(default, setter(strip_option, into))] + kernel: Option, + #[builder(default, setter(strip_option, into))] + load_vm: Option, + #[builder(default, setter(strip_option, into))] + machine: Option, + #[builder(default, setter(strip_option))] + monitor: Option, + #[builder(default, setter(strip_option, into))] + no_graphic: Option, + #[builder(default, setter(strip_option))] + ram_size: Option, + #[builder(default, setter(strip_option))] + serial: Option, + #[builder(default, setter(strip_option))] + smp_cpus: Option, + #[builder(default, setter(strip_option, into))] + snapshot: Option, + #[builder(default, setter(strip_option, into))] + vga_pci: Option, + #[builder(default, setter(strip_option, into))] + start_cpu: Option, + #[cfg(feature = "usermode")] + #[builder(setter(into))] + program: Program, +} // Adding something here? Please leave Program as the last field + +impl From for Result { + /// This method is necessary to make the API resemble a typical builder pattern, i.e. + /// `Qemu::builder().foo(bar).build()`, while still leveraging `TypedBuilder` for this + /// non-standard use case where `Qemu` doesn't store the configuration. + /// Internally, `TypedBuilder` is used to generate a builder for `QemuConfig`. + /// This `QemuConfig.into()` method is used by the derived `QemuConfigBuilder.build()` + /// to go from `QemuConfigBuilder` to `QemuConfig`, and finally to `Qemu` in one fn. + /// + /// # Errors + /// returns `QemuInitError` if the Qemu initialization fails, including cases where Qemu has + /// already been initialized. + fn from(config: QemuConfig) -> Self { + let args = config + .to_string() + .split(' ') + .map(ToString::to_string) + .collect::>(); + let qemu = Qemu::init(&args)?; + QEMU_CONFIG + .set(config) + .map_err(|_| unreachable!("BUG: QEMU_CONFIG was already set but Qemu was not init!"))?; + Ok(qemu) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + #[cfg(feature = "usermode")] + fn usermode() { + let program = "/bin/pwd"; + let qemu = Qemu::builder().program("/bin/pwd").build().unwrap(); + let config = qemu.get_config().unwrap(); + assert_eq!(config.to_string().trim(), program.trim()); + } + + #[test] + fn drive_no_file_fmt() { + let drive = Drive::builder() + .format(DiskImageFileFormat::Raw) + .interface(DriveInterface::Ide) + .build(); + assert_eq!(drive.to_string(), "-drive format=raw,if=ide"); + } + + #[test] + #[cfg(feature = "systemmode")] + fn accelerator_kvm_to_string() { + let accel = Accelerator::Kvm; + assert_eq!(accel.to_string(), "-accel kvm"); + } +} diff --git a/libafl_qemu/src/qemu/hooks.rs b/libafl_qemu/src/qemu/hooks.rs new file mode 100644 index 0000000000..334b558ae1 --- /dev/null +++ b/libafl_qemu/src/qemu/hooks.rs @@ -0,0 +1,1131 @@ +//! The high-level hooks +#![allow(clippy::type_complexity)] +#![allow(clippy::missing_transmute_annotations)] +#![allow(clippy::too_many_arguments)] + +use core::{ffi::c_void, fmt::Debug, mem::transmute, ptr}; + +use libafl::{executors::hooks::inprocess::inprocess_get_state, inputs::UsesInput}; +#[cfg(feature = "usermode")] +use libafl_qemu_sys::libafl_dump_core_hook; +use libafl_qemu_sys::{CPUArchStatePtr, CPUStatePtr, FatPtr, GuestAddr, GuestUsize}; +#[cfg(feature = "python")] +use pyo3::{pyclass, pymethods, FromPyObject}; + +use crate::{ + emu::EmulatorModules, + qemu::{MemAccessInfo, Qemu}, + sys::TCGTemp, + HookData, HookId, +}; + +pub const SKIP_EXEC_HOOK: u64 = u64::MAX; + +// all kinds of hooks +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum HookRepr { + Function(*const c_void), + Closure(FatPtr), + Empty, +} + +#[derive(Debug)] +pub struct TcgHookState { + id: H, + gen: HookRepr, + post_gen: HookRepr, + execs: [HookRepr; N], +} + +#[derive(Debug)] +pub struct HookState { + id: H, + pre_run: HookRepr, + post_run: HookRepr, +} + +impl TcgHookState { + pub fn new(id: H, gen: HookRepr, post_gen: HookRepr, execs: [HookRepr; N]) -> Self { + Self { + id, + gen, + post_gen, + execs, + } + } + + /// # Safety + /// + /// ids should be in sync with QEMU hooks ids. + pub unsafe fn set_id(&mut self, id: H) { + self.id = id; + } +} + +impl HookState { + pub fn new(id: H, pre_run: HookRepr, post_run: HookRepr) -> Self { + Self { + id, + pre_run, + post_run, + } + } + + /// # Safety + /// + /// ids should be in sync with QEMU hooks ids. + pub unsafe fn set_id(&mut self, id: H) { + self.id = id; + } +} + +pub enum Hook { + Function(F), + Closure(C), + Raw(R), + Empty, +} + +#[repr(C)] +#[cfg_attr(feature = "python", pyclass)] +#[cfg_attr(feature = "python", derive(FromPyObject))] +pub struct SyscallHookResult { + pub retval: GuestAddr, + pub skip_syscall: bool, +} + +impl Hook { + pub fn is_empty(&self) -> bool { + matches!(self, Hook::Empty) + } +} + +macro_rules! create_wrapper { + ($name:ident, ($($param:ident : $param_type:ty),*)) => { + paste::paste! { + pub extern "C" fn [](hook: &mut c_void, $($param: $param_type),*) + where + S: UsesInput + Unpin, + { + unsafe { + let modules = EmulatorModules::::emulator_modules_mut_unchecked(); + let func: fn(&mut EmulatorModules, Option<&mut S>, $($param_type),*) = transmute(ptr::from_mut::(hook)); + func(modules, inprocess_get_state::(), $($param),*); + } + } + + pub extern "C" fn [](hook: &mut FatPtr, $($param: $param_type),*) + where + S: Unpin + UsesInput, + { + unsafe { + let modules = EmulatorModules::::emulator_modules_mut_unchecked(); + let func: &mut Box, Option<&mut S>, $($param_type),*)> = &mut *(ptr::from_mut::(hook) as *mut Box, Option<&mut S>, $($param_type),*)>); + func(modules, inprocess_get_state::(), $($param),*); + } + } + } + }; + ($name:ident, ($($param:ident : $param_type:ty),*), $ret_type:ty) => { + paste::paste! { + pub extern "C" fn [](hook: &mut c_void, $($param: $param_type),*) -> $ret_type + where + S: UsesInput + Unpin, + { + unsafe { + let modules = EmulatorModules::::emulator_modules_mut_unchecked(); + let func: fn(&mut EmulatorModules, Option<&mut S>, $($param_type),*) -> $ret_type= transmute(ptr::from_mut::(hook)); + func(modules, inprocess_get_state::(), $($param),*) + } + } + + pub extern "C" fn [](hook: &mut FatPtr, $($param: $param_type),*) -> $ret_type + where + S: UsesInput + Unpin, + { + unsafe { + let modules = EmulatorModules::::emulator_modules_mut_unchecked(); + let func: &mut Box, Option<&mut S>, $($param_type),*) -> $ret_type> = &mut *(ptr::from_mut::(hook) as *mut Box, Option<&mut S>, $($param_type),*) -> $ret_type>); + func(modules, inprocess_get_state::(), $($param),*) + } + } + } + }; +} + +macro_rules! create_pre_exec_wrapper { + ($name:ident, ($($param:ident : $param_type:ty),*), $hook_id:ident) => { + paste::paste! { + pub extern "C" fn [<$name _pre_exec_hook_wrapper>](hook: &mut HookState<$hook_id>, $($param: $param_type),*) + where + S: UsesInput + Unpin, + { + unsafe { + let modules = EmulatorModules::::emulator_modules_mut_unchecked(); + + match &mut hook.pre_run { + HookRepr::Function(ptr) => { + let func: fn(&mut EmulatorModules, Option<&mut S>, $($param_type),*) = + transmute(*ptr); + func(modules, inprocess_get_state::(), $($param),*) + } + HookRepr::Closure(ptr) => { + let func: &mut Box< + dyn FnMut(&mut EmulatorModules, Option<&mut S>, $($param_type),*), + > = &mut *(ptr::from_mut::(ptr) as *mut Box< + dyn FnMut(&mut EmulatorModules, Option<&mut S>, $($param_type),*), + >); + func(modules, inprocess_get_state::(), $($param),*) + } + _ => (), + } + } + } + } + } +} + +macro_rules! create_post_exec_wrapper { + ($name:ident, ($($param:ident : $param_type:ty),*), $hook_id:ident) => { + paste::paste! { + pub extern "C" fn [<$name _post_exec_hook_wrapper>](hook: &mut HookState<$hook_id>, $($param: $param_type),*) + where + S: UsesInput + Unpin, + { + unsafe { + let modules = EmulatorModules::::emulator_modules_mut_unchecked(); + + match &mut hook.post_run { + HookRepr::Function(ptr) => { + let func: fn(&mut EmulatorModules, Option<&mut S>, $($param_type),*) = + transmute(*ptr); + func(modules, inprocess_get_state::(), $($param),*); + } + HookRepr::Closure(ptr) => { + let func: &mut Box< + dyn FnMut(&mut EmulatorModules, Option<&mut S>, $($param_type),*), + > = &mut *(ptr::from_mut::(ptr) as *mut Box< + dyn FnMut(&mut EmulatorModules, Option<&mut S>, $($param_type),*), + >); + func(modules, inprocess_get_state::(), $($param),*); + } + _ => (), + } + } + } + } + } +} + +macro_rules! create_gen_wrapper { + ($name:ident, ($($param:ident : $param_type:ty),*), $ret_type:ty, $execs:literal, $hook_id:ident) => { + paste::paste! { + pub extern "C" fn [<$name _gen_hook_wrapper>](hook: &mut TcgHookState<{ $execs }, $hook_id>, $($param: $param_type),*) -> $ret_type + where + S: UsesInput + Unpin, + { + unsafe { + let modules = EmulatorModules::::emulator_modules_mut_unchecked(); + + match &mut hook.gen { + HookRepr::Function(ptr) => { + let func: fn(&mut EmulatorModules, Option<&mut S>, $($param_type),*) -> Option<$ret_type> = + transmute(*ptr); + func(modules, inprocess_get_state::(), $($param),*).map_or(SKIP_EXEC_HOOK, |id| id) + } + HookRepr::Closure(ptr) => { + let func: &mut Box< + dyn FnMut(&mut EmulatorModules, Option<&mut S>, $($param_type),*) -> Option<$ret_type>, + > = &mut *(ptr::from_mut::(ptr) as *mut Box, Option<&mut S>, $($param_type),*) -> Option<$ret_type>>); + func(modules, inprocess_get_state::(), $($param),*).map_or(SKIP_EXEC_HOOK, |id| id) + } + _ => 0, + } + } + } + } + } +} + +macro_rules! create_post_gen_wrapper { + ($name:ident, ($($param:ident : $param_type:ty),*), $execs:literal, $hook_id:ident) => { + paste::paste! { + pub extern "C" fn [<$name _post_gen_hook_wrapper>](hook: &mut TcgHookState<{ $execs }, $hook_id>, $($param: $param_type),*) + where + S: UsesInput + Unpin, + { + unsafe { + let modules = EmulatorModules::::emulator_modules_mut_unchecked(); + match &mut hook.post_gen { + HookRepr::Function(ptr) => { + let func: fn(&mut EmulatorModules, Option<&mut S>, $($param_type),*) = + transmute(*ptr); + func(modules, inprocess_get_state::(), $($param),*); + } + HookRepr::Closure(ptr) => { + let func: &mut Box< + dyn FnMut(&mut EmulatorModules, Option<&mut S>, $($param_type),*), + > = &mut *(ptr::from_mut::(ptr) as *mut Box, Option<&mut S>, $($param_type),*)>); + func(modules, inprocess_get_state::(), $($param),*); + } + _ => (), + } + } + } + } + } +} + +macro_rules! create_exec_wrapper { + ($name:ident, ($($param:ident : $param_type:ty),*), $execidx:literal, $execs:literal, $hook_id:ident) => { + paste::paste! { + pub extern "C" fn [<$name _ $execidx _exec_hook_wrapper>](hook: &mut TcgHookState<{ $execs }, $hook_id>, $($param: $param_type),*) + where + S: UsesInput + Unpin, + { + unsafe { + let modules = EmulatorModules::::emulator_modules_mut_unchecked(); + match &mut hook.execs[$execidx] { + HookRepr::Function(ptr) => { + let func: fn(&mut EmulatorModules, Option<&mut S>, $($param_type),*) = transmute(*ptr); + func(modules, inprocess_get_state::(), $($param),*); + } + HookRepr::Closure(ptr) => { + let func: &mut Box, Option<&mut S>, $($param_type),*)> = + &mut *(ptr::from_mut::(ptr) as *mut Box, Option<&mut S>, $($param_type),*)>); + func(modules, inprocess_get_state::(), $($param),*); + } + _ => (), + } + } + } + } + } +} + +macro_rules! create_hook_id { + ($name:ident, $sys:ident, true) => { + paste::paste! { + #[derive(Clone, Copy, PartialEq, Debug)] + pub struct [<$name HookId>](pub(crate) usize); + impl [<$name HookId>] { + #[must_use] + pub fn invalid() -> Self { + Self(0) + } + } + impl HookId for [<$name HookId>] { + fn remove(&self, invalidate_block: bool) -> bool { + unsafe { libafl_qemu_sys::$sys(self.0, invalidate_block.into()) != 0 } + } + } + } + }; + ($name:ident, $sys:ident, false) => { + paste::paste! { + #[derive(Clone, Copy, PartialEq, Debug)] + pub struct [<$name HookId>](pub(crate) usize); + impl [<$name HookId>] { + #[must_use] + pub fn invalid() -> Self { + Self(0) + } + } + impl HookId for [<$name HookId>] { + fn remove(&self, _invalidate_block: bool) -> bool { + unsafe { libafl_qemu_sys::$sys(self.0) != 0 } + } + } + } + }; +} + +macro_rules! create_hook_types { + ($name:ident, $fn_type:ty, $closure_type:ty, $raw_type:ty) => { + paste::paste! { + pub type [<$name HookFn>] = $fn_type; + pub type [<$name HookClosure>] = $closure_type; + pub type [<$name HookRaw>] = $raw_type; + + pub type [<$name Hook>] = Hook< + [<$name HookFn>], + [<$name HookClosure>], + [<$name HookRaw>], + >; + } + }; +} + +// Instruction hook wrappers +create_hook_types!( + Instruction, + fn(&mut EmulatorModules, Option<&mut S>, GuestAddr), + Box FnMut(&'a mut EmulatorModules, Option<&'a mut S>, GuestAddr)>, + extern "C" fn(*const (), pc: GuestAddr) +); +create_hook_id!(Instruction, libafl_qemu_remove_instruction_hook, true); +create_wrapper!(instruction, (pc: GuestAddr)); + +// Backdoor hook wrappers +create_hook_types!( + Backdoor, + fn(&mut EmulatorModules, Option<&mut S>, cpu: CPUArchStatePtr, GuestAddr), + Box FnMut(&'a mut EmulatorModules, Option<&'a mut S>, GuestAddr)>, + extern "C" fn(*const (), cpu: CPUArchStatePtr, pc: GuestAddr) +); +create_hook_id!(Backdoor, libafl_qemu_remove_backdoor_hook, true); +create_wrapper!(backdoor, (cpu: CPUArchStatePtr, pc: GuestAddr)); + +// Pre-syscall hook wrappers +#[cfg(feature = "usermode")] +create_hook_types!( + PreSyscall, + fn( + &mut EmulatorModules, + Option<&mut S>, + sys_num: i32, + a0: GuestAddr, + a1: GuestAddr, + a2: GuestAddr, + a3: GuestAddr, + a4: GuestAddr, + a5: GuestAddr, + a6: GuestAddr, + a7: GuestAddr, + ) -> SyscallHookResult, + Box< + dyn for<'a> FnMut( + &'a mut EmulatorModules, + Option<&'a mut S>, + i32, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + ) -> SyscallHookResult, + >, + extern "C" fn( + *const (), + i32, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + ) -> SyscallHookResult +); +#[cfg(feature = "usermode")] +create_hook_id!(PreSyscall, libafl_qemu_remove_pre_syscall_hook, false); +#[cfg(feature = "usermode")] +create_wrapper!( + pre_syscall, + ( + sys_num: i32, + a0: GuestAddr, + a1: GuestAddr, + a2: GuestAddr, + a3: GuestAddr, + a4: GuestAddr, + a5: GuestAddr, + a6: GuestAddr, + a7: GuestAddr + ), + SyscallHookResult +); + +// Post-syscall hook wrappers +#[cfg(feature = "usermode")] +create_hook_types!( + PostSyscall, + fn( + &mut EmulatorModules, + Option<&mut S>, + res: GuestAddr, + sys_num: i32, + a0: GuestAddr, + a1: GuestAddr, + a2: GuestAddr, + a3: GuestAddr, + a4: GuestAddr, + a5: GuestAddr, + a6: GuestAddr, + a7: GuestAddr, + ) -> GuestAddr, + Box< + dyn for<'a> FnMut( + &'a mut EmulatorModules, + Option<&mut S>, + GuestAddr, + i32, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + ) -> GuestAddr, + >, + extern "C" fn( + *const (), + GuestAddr, + i32, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + ) -> GuestAddr +); +#[cfg(feature = "usermode")] +create_hook_id!(PostSyscall, libafl_qemu_remove_post_syscall_hook, false); +#[cfg(feature = "usermode")] +create_wrapper!( + post_syscall, + ( + res: GuestAddr, + sys_num: i32, + a0: GuestAddr, + a1: GuestAddr, + a2: GuestAddr, + a3: GuestAddr, + a4: GuestAddr, + a5: GuestAddr, + a6: GuestAddr, + a7: GuestAddr + ), + GuestAddr +); + +// New thread hook wrappers +create_hook_types!( + NewThread, + fn(&mut EmulatorModules, Option<&mut S>, env: CPUArchStatePtr, tid: u32) -> bool, + Box< + dyn for<'a> FnMut( + &'a mut EmulatorModules, + Option<&'a mut S>, + CPUArchStatePtr, + u32, + ) -> bool, + >, + extern "C" fn(*const (), env: CPUArchStatePtr, tid: u32) -> bool +); +create_hook_id!(NewThread, libafl_qemu_remove_new_thread_hook, false); +create_wrapper!(new_thread, (env: CPUArchStatePtr, tid: u32), bool); + +// CPU Run hook wrappers +create_hook_types!( + CpuPreRun, + fn(&mut EmulatorModules, Option<&mut S>, cpu: CPUStatePtr), + Box FnMut(&'a mut EmulatorModules, Option<&'a mut S>, CPUStatePtr)>, + extern "C" fn(*const (), cpu: CPUStatePtr) +); +create_hook_types!( + CpuPostRun, + fn(&mut EmulatorModules, Option<&mut S>, cpu: CPUStatePtr), + Box FnMut(&'a mut EmulatorModules, Option<&'a mut S>, CPUStatePtr)>, + extern "C" fn(*const (), cpu: CPUStatePtr) +); +create_hook_id!(CpuRun, libafl_qemu_remove_cpu_run_hook, false); +create_pre_exec_wrapper!(cpu_run, (cpu: CPUStatePtr), CpuRunHookId); +create_post_exec_wrapper!(cpu_run, (addr: CPUStatePtr), CpuRunHookId); +create_wrapper!(cpu_run, (cpu: CPUStatePtr)); + +// Edge hook wrappers +create_hook_types!( + EdgeGen, + fn(&mut EmulatorModules, Option<&mut S>, src: GuestAddr, dest: GuestAddr) -> Option, + Box< + dyn for<'a> FnMut( + &'a mut EmulatorModules, + Option<&'a mut S>, + GuestAddr, + GuestAddr, + ) -> Option, + >, + extern "C" fn(*const (), src: GuestAddr, dest: GuestAddr) -> u64 +); +create_hook_types!( + EdgeExec, + fn(&mut EmulatorModules, Option<&mut S>, id: u64), + Box FnMut(&'a mut EmulatorModules, Option<&'a mut S>, u64)>, + unsafe extern "C" fn(*const (), id: u64) +); +create_hook_id!(Edge, libafl_qemu_remove_edge_hook, true); +create_gen_wrapper!(edge, (src: GuestAddr, dest: GuestAddr), u64, 1, EdgeHookId); +create_exec_wrapper!(edge, (id: u64), 0, 1, EdgeHookId); + +// Block hook wrappers +create_hook_types!( + BlockGen, + fn(&mut EmulatorModules, Option<&mut S>, pc: GuestAddr) -> Option, + Box< + dyn for<'a> FnMut( + &'a mut EmulatorModules, + Option<&'a mut S>, + GuestAddr, + ) -> Option, + >, + unsafe extern "C" fn(*const (), pc: GuestAddr) -> u64 +); +create_hook_types!( + BlockPostGen, + fn(&mut EmulatorModules, Option<&mut S>, pc: GuestAddr, block_length: GuestUsize), + Box FnMut(&'a mut EmulatorModules, Option<&mut S>, GuestAddr, GuestUsize)>, + unsafe extern "C" fn(*const (), pc: GuestAddr, block_length: GuestUsize) +); +create_hook_types!( + BlockExec, + fn(&mut EmulatorModules, Option<&mut S>, id: u64), + Box FnMut(&'a mut EmulatorModules, Option<&'a mut S>, u64)>, + unsafe extern "C" fn(*const (), id: u64) +); + +create_hook_id!(Block, libafl_qemu_remove_block_hook, true); +create_gen_wrapper!(block, (addr: GuestAddr), u64, 1, BlockHookId); +create_post_gen_wrapper!(block, (addr: GuestAddr, len: GuestUsize), 1, BlockHookId); +create_exec_wrapper!(block, (id: u64), 0, 1, BlockHookId); + +// Read hook wrappers +create_hook_types!( + ReadGen, + fn( + qemu_modules: &mut EmulatorModules, + Option<&mut S>, + pc: GuestAddr, + addr: *mut TCGTemp, + info: MemAccessInfo, + ) -> Option, + Box< + dyn for<'a> FnMut( + &'a mut EmulatorModules, + Option<&'a mut S>, + GuestAddr, + *mut TCGTemp, + MemAccessInfo, + ) -> Option, + >, + unsafe extern "C" fn(*const (), pc: GuestAddr, addr: *mut TCGTemp, info: MemAccessInfo) -> u64 +); +create_hook_types!( + ReadExec, + fn(&mut EmulatorModules, Option<&mut S>, id: u64, addr: GuestAddr), + Box FnMut(&'a mut EmulatorModules, Option<&'a mut S>, u64, GuestAddr)>, + unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr) +); +create_hook_types!( + ReadExecN, + fn(&mut EmulatorModules, Option<&mut S>, id: u64, addr: GuestAddr, size: usize), + Box< + dyn for<'a> FnMut(&'a mut EmulatorModules, Option<&'a mut S>, u64, GuestAddr, usize), + >, + unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr, size: usize) +); +create_hook_id!(Read, libafl_qemu_remove_read_hook, true); +create_gen_wrapper!(read, (pc: GuestAddr, addr: *mut TCGTemp, info: MemAccessInfo), u64, 5, ReadHookId); +create_exec_wrapper!(read, (id: u64, addr: GuestAddr), 0, 5, ReadHookId); +create_exec_wrapper!(read, (id: u64, addr: GuestAddr), 1, 5, ReadHookId); +create_exec_wrapper!(read, (id: u64, addr: GuestAddr), 2, 5, ReadHookId); +create_exec_wrapper!(read, (id: u64, addr: GuestAddr), 3, 5, ReadHookId); +create_exec_wrapper!( + read, + (id: u64, addr: GuestAddr, size: usize), + 4, + 5, + ReadHookId +); + +// Write hook wrappers +create_hook_types!( + WriteGen, + fn( + &mut EmulatorModules, + Option<&mut S>, + pc: GuestAddr, + addr: *mut TCGTemp, + info: MemAccessInfo, + ) -> Option, + Box< + dyn for<'a> FnMut( + &'a mut EmulatorModules, + Option<&'a mut S>, + GuestAddr, + *mut TCGTemp, + MemAccessInfo, + ) -> Option, + >, + unsafe extern "C" fn(*const (), pc: GuestAddr, addr: *mut TCGTemp, info: MemAccessInfo) -> u64 +); +create_hook_types!( + WriteExec, + fn(&mut EmulatorModules, Option<&mut S>, id: u64, addr: GuestAddr), + Box FnMut(&'a mut EmulatorModules, Option<&'a mut S>, u64, GuestAddr)>, + unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr) +); +create_hook_types!( + WriteExecN, + fn(&mut EmulatorModules, Option<&mut S>, id: u64, addr: GuestAddr, size: usize), + Box< + dyn for<'a> FnMut(&'a mut EmulatorModules, Option<&'a mut S>, u64, GuestAddr, usize), + >, + unsafe extern "C" fn(*const (), id: u64, addr: GuestAddr, size: usize) +); +create_hook_id!(Write, libafl_qemu_remove_write_hook, true); +create_gen_wrapper!(write, (pc: GuestAddr, addr: *mut TCGTemp, info: MemAccessInfo), u64, 5, WriteHookId); +create_exec_wrapper!(write, (id: u64, addr: GuestAddr), 0, 5, WriteHookId); +create_exec_wrapper!(write, (id: u64, addr: GuestAddr), 1, 5, WriteHookId); +create_exec_wrapper!(write, (id: u64, addr: GuestAddr), 2, 5, WriteHookId); +create_exec_wrapper!(write, (id: u64, addr: GuestAddr), 3, 5, WriteHookId); +create_exec_wrapper!( + write, + (id: u64, addr: GuestAddr, size: usize), + 4, + 5, + WriteHookId +); + +// Cmp hook wrappers +create_hook_types!( + CmpGen, + fn(&mut EmulatorModules, Option<&mut S>, pc: GuestAddr, size: usize) -> Option, + Box< + dyn for<'a> FnMut( + &'a mut EmulatorModules, + Option<&'a mut S>, + GuestAddr, + usize, + ) -> Option, + >, + unsafe extern "C" fn(*const (), pc: GuestAddr, size: usize) -> u64 +); +pub type CmpExecHook = Hook< + fn(&mut EmulatorModules, Option<&mut S>, id: u64, v0: SZ, v1: SZ), + Box FnMut(&'a mut EmulatorModules, Option<&'a mut S>, u64, SZ, SZ)>, + unsafe extern "C" fn(*const (), id: u64, v0: SZ, v1: SZ), +>; +create_hook_id!(Cmp, libafl_qemu_remove_cmp_hook, true); +create_gen_wrapper!(cmp, (pc: GuestAddr, size: usize), u64, 4, CmpHookId); +create_exec_wrapper!(cmp, (id: u64, v0: u8, v1: u8), 0, 4, CmpHookId); +create_exec_wrapper!(cmp, (id: u64, v0: u16, v1: u16), 1, 4, CmpHookId); +create_exec_wrapper!(cmp, (id: u64, v0: u32, v1: u32), 2, 4, CmpHookId); +create_exec_wrapper!(cmp, (id: u64, v0: u64, v1: u64), 3, 4, CmpHookId); + +// Jmp hook wrappers +create_hook_types!( + JmpGen, + fn(&mut EmulatorModules, Option<&mut S>, src: GuestAddr, dest: GuestAddr) -> Option, + Box< + dyn for<'a> FnMut( + &'a mut EmulatorModules, + Option<&'a mut S>, + GuestAddr, + GuestAddr, + ) -> Option, + >, + extern "C" fn(*const (), src: GuestAddr, dest: GuestAddr) -> u64 +); +create_hook_types!( + JmpExec, + fn(&mut EmulatorModules, Option<&mut S>, src: GuestAddr, dest: GuestAddr, id: u64), + Box FnMut(&'a mut EmulatorModules, Option<&'a mut S>, GuestAddr, GuestAddr, u64)>, + unsafe extern "C" fn(*const (), src: GuestAddr, dest: GuestAddr, id: u64) +); +create_hook_id!(Jmp, libafl_qemu_remove_jmp_hook, true); +create_gen_wrapper!(jmp, (src: GuestAddr, dest: GuestAddr), u64, 1, JmpHookId); +create_exec_wrapper!(jmp, (src: GuestAddr, dst: GuestAddr, id: u64), 0, 1, JmpHookId); +// static mut JMP_HOOKS: Vec>>> = vec![]; + +// Crash hook wrappers +#[cfg(feature = "usermode")] +pub type CrashHookFn = fn(&mut EmulatorModules, i32); +#[cfg(feature = "usermode")] +pub type CrashHookClosure = Box, i32)>; + +/// The thin wrapper around QEMU hooks. +/// It is considered unsafe to use it directly. +#[derive(Clone, Copy, Debug)] +pub struct QemuHooks { + _private: (), +} + +impl QemuHooks { + /// Get a `QemuHooks` object. + /// Same as `QemuHooks::get`, but without checking whether `QemuHooks` have been correctly initialized. + /// + /// # Safety + /// + /// Should not be used out of Qemu itself. + /// Prefer `Qemu::get` for a safe version of this method. + #[must_use] + pub unsafe fn get_unchecked() -> Self { + QemuHooks { _private: () } + } + + #[must_use] + pub fn get() -> Option { + // Use QEMU to check if hooks have been initialized. + Some(Qemu::get()?.hooks()) + } + + // TODO set T lifetime to be like Emulator + #[allow(clippy::missing_transmute_annotations)] + pub fn add_instruction_hooks>( + &self, + data: T, + addr: GuestAddr, + callback: unsafe extern "C" fn(T, GuestAddr), + invalidate_block: bool, + ) -> InstructionHookId { + unsafe { + let data: u64 = data.into().0; + let callback: extern "C" fn(u64, GuestAddr) = transmute(callback); + let num = libafl_qemu_sys::libafl_qemu_add_instruction_hooks( + addr.into(), + Some(callback), + data, + i32::from(invalidate_block), + ); + InstructionHookId(num) + } + } + + #[must_use] + pub fn remove_instruction_hooks_at(&self, addr: GuestAddr, invalidate_block: bool) -> usize { + unsafe { + libafl_qemu_sys::libafl_qemu_remove_instruction_hooks_at( + addr.into(), + i32::from(invalidate_block), + ) + } + } + + #[allow(clippy::missing_transmute_annotations)] + pub fn add_edge_hooks>( + &self, + data: T, + gen: Option u64>, + exec: Option, + ) -> EdgeHookId { + unsafe { + let data: u64 = data.into().0; + let gen: Option u64> = + transmute(gen); + let exec: Option = transmute(exec); + let num = libafl_qemu_sys::libafl_add_edge_hook(gen, exec, data); + EdgeHookId(num) + } + } + + #[allow(clippy::missing_transmute_annotations)] + pub fn add_block_hooks>( + &self, + data: T, + gen: Option u64>, + post_gen: Option, + exec: Option, + ) -> BlockHookId { + unsafe { + let data: u64 = data.into().0; + let gen: Option u64> = transmute(gen); + let post_gen: Option = + transmute(post_gen); + let exec: Option = transmute(exec); + let num = libafl_qemu_sys::libafl_add_block_hook(gen, post_gen, exec, data); + BlockHookId(num) + } + } + + #[allow(clippy::missing_transmute_annotations)] + pub fn add_cpu_run_hooks>( + &self, + data: T, + pre_exec: Option, + post_exec: Option, + ) -> CpuRunHookId { + unsafe { + let data: u64 = data.into().0; + let pre_exec: Option = transmute(pre_exec); + let post_gen: Option = transmute(post_exec); + let num = libafl_qemu_sys::libafl_hook_cpu_run_add(pre_exec, post_gen, data); + CpuRunHookId(num) + } + } + + /// `data` can be used to pass data that can be accessed as the first argument in the `gen` and the `exec` functions + /// + /// `gen` gets passed the current programm counter, mutable access to a `TCGTemp` and information about the memory + /// access being performed. + /// The `u64` return value is an id that gets passed to the `exec` functions as their second argument. + /// + /// `exec` hooks get invoked on every read performed by the guest + /// + /// `exec1`-`exec8` special case accesses of width 1-8 + /// + /// If there is no specialized hook for a given read width, the `exec_n` will be + /// called and its last argument will specify the access width + #[allow(clippy::missing_transmute_annotations)] + pub fn add_read_hooks>( + &self, + data: T, + gen: Option u64>, + exec1: Option, + exec2: Option, + exec4: Option, + exec8: Option, + exec_n: Option, + ) -> ReadHookId { + unsafe { + let data: u64 = data.into().0; + let gen: Option< + unsafe extern "C" fn( + u64, + GuestAddr, + *mut TCGTemp, + libafl_qemu_sys::MemOpIdx, + ) -> u64, + > = transmute(gen); + let exec1: Option = transmute(exec1); + let exec2: Option = transmute(exec2); + let exec4: Option = transmute(exec4); + let exec8: Option = transmute(exec8); + let exec_n: Option = + transmute(exec_n); + let num = libafl_qemu_sys::libafl_add_read_hook( + gen, exec1, exec2, exec4, exec8, exec_n, data, + ); + ReadHookId(num) + } + } + + // TODO add MemOp info + #[allow(clippy::missing_transmute_annotations)] + pub fn add_write_hooks>( + &self, + data: T, + gen: Option u64>, + exec1: Option, + exec2: Option, + exec4: Option, + exec8: Option, + exec_n: Option, + ) -> WriteHookId { + unsafe { + let data: u64 = data.into().0; + let gen: Option< + unsafe extern "C" fn( + u64, + GuestAddr, + *mut TCGTemp, + libafl_qemu_sys::MemOpIdx, + ) -> u64, + > = transmute(gen); + let exec1: Option = transmute(exec1); + let exec2: Option = transmute(exec2); + let exec4: Option = transmute(exec4); + let exec8: Option = transmute(exec8); + let exec_n: Option = + transmute(exec_n); + let num = libafl_qemu_sys::libafl_add_write_hook( + gen, exec1, exec2, exec4, exec8, exec_n, data, + ); + WriteHookId(num) + } + } + + #[allow(clippy::missing_transmute_annotations)] + pub fn add_cmp_hooks>( + &self, + data: T, + gen: Option u64>, + exec1: Option, + exec2: Option, + exec4: Option, + exec8: Option, + ) -> CmpHookId { + unsafe { + let data: u64 = data.into().0; + let gen: Option u64> = transmute(gen); + let exec1: Option = transmute(exec1); + let exec2: Option = transmute(exec2); + let exec4: Option = transmute(exec4); + let exec8: Option = transmute(exec8); + let num = libafl_qemu_sys::libafl_add_cmp_hook(gen, exec1, exec2, exec4, exec8, data); + CmpHookId(num) + } + } + + #[allow(clippy::missing_transmute_annotations)] + pub fn add_backdoor_hook>( + &self, + data: T, + callback: extern "C" fn(T, CPUArchStatePtr, GuestAddr), + ) -> BackdoorHookId { + unsafe { + let data: u64 = data.into().0; + let callback: extern "C" fn(u64, CPUArchStatePtr, GuestAddr) = transmute(callback); + let num = libafl_qemu_sys::libafl_add_backdoor_hook(Some(callback), data); + BackdoorHookId(num) + } + } + + pub fn add_new_thread_hook>( + &self, + data: T, + callback: extern "C" fn(T, env: CPUArchStatePtr, tid: u32) -> bool, + ) -> NewThreadHookId { + unsafe { + let data: u64 = data.into().0; + let callback: extern "C" fn(u64, CPUArchStatePtr, u32) -> bool = transmute(callback); + let num = libafl_qemu_sys::libafl_add_new_thread_hook(Some(callback), data); + NewThreadHookId(num) + } + } + + pub fn add_jmp_hooks>( + &self, + data: T, + gen: Option u64>, + exec: Option, + ) -> JmpHookId { + unsafe { + let data: u64 = data.into().0; + let gen: Option u64> = + core::mem::transmute(gen); + let exec: Option = core::mem::transmute(exec); + let num = libafl_qemu_sys::libafl_add_jmp_hook(gen, exec, data); + JmpHookId(num) + } + } +} + +#[cfg(feature = "usermode")] +impl QemuHooks { + #[allow(clippy::type_complexity)] + pub fn add_pre_syscall_hook>( + &self, + data: T, + callback: extern "C" fn( + T, + i32, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + ) -> SyscallHookResult, + ) -> PreSyscallHookId { + unsafe { + let data: u64 = data.into().0; + let callback: extern "C" fn( + u64, + i32, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + ) -> libafl_qemu_sys::syshook_ret = transmute(callback); + let num = libafl_qemu_sys::libafl_add_pre_syscall_hook(Some(callback), data); + PreSyscallHookId(num) + } + } + + #[allow(clippy::type_complexity)] + pub fn add_post_syscall_hook>( + &self, + data: T, + callback: extern "C" fn( + T, + GuestAddr, + i32, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + ) -> GuestAddr, + ) -> PostSyscallHookId { + unsafe { + let data: u64 = data.into().0; + let callback: extern "C" fn( + u64, + GuestAddr, + i32, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + GuestAddr, + ) -> GuestAddr = transmute(callback); + let num = libafl_qemu_sys::libafl_add_post_syscall_hook(Some(callback), data); + PostSyscallHookId(num) + } + } + + #[allow(clippy::type_complexity)] + #[allow(clippy::unused_self)] + pub(crate) fn set_crash_hook(self, callback: extern "C" fn(i32)) { + unsafe { + libafl_dump_core_hook = Some(callback); + } + } +} + +#[cfg(feature = "python")] +#[pymethods] +impl SyscallHookResult { + #[new] + #[pyo3(signature = ( + value=None + ))] + #[must_use] + pub fn new(value: Option) -> Self { + Self::new_internal(value) + } +} + +impl SyscallHookResult { + #[cfg(not(feature = "python"))] + #[must_use] + pub fn new(value: Option) -> Self { + Self::new_internal(value) + } + + #[must_use] + fn new_internal(value: Option) -> Self { + value.map_or( + Self { + retval: 0, + skip_syscall: false, + }, + |v| Self { + retval: v, + skip_syscall: true, + }, + ) + } +} diff --git a/libafl_qemu/src/qemu/mod.rs b/libafl_qemu/src/qemu/mod.rs index 645e9e6129..f5b54e85d9 100644 --- a/libafl_qemu/src/qemu/mod.rs +++ b/libafl_qemu/src/qemu/mod.rs @@ -1,91 +1,72 @@ //! Low-level QEMU library //! //! This module exposes the low-level QEMU library through [`Qemu`]. -//! To access higher-level features of QEMU, it is recommanded to use [`crate::Emulator`] instead. +//! To access higher-level features of QEMU, it is recommended to use [`crate::Emulator`] instead. -use core::fmt; -use std::{ +use core::{ cmp::{Ordering, PartialOrd}, - ffi::CString, + fmt, ptr, +}; +use std::{ + ffi::{c_void, CString}, fmt::{Display, Formatter}, intrinsics::{copy_nonoverlapping, transmute}, mem::MaybeUninit, ops::Range, pin::Pin, - ptr, - ptr::{addr_of, null}, }; use libafl_bolts::os::unix_signals::Signal; -#[cfg(emulation_mode = "systemmode")] -use libafl_qemu_sys::qemu_init; -#[cfg(emulation_mode = "usermode")] -use libafl_qemu_sys::{guest_base, qemu_user_init, VerifyAccess}; use libafl_qemu_sys::{ libafl_flush_jit, libafl_get_exit_reason, libafl_page_from_addr, libafl_qemu_add_gdb_cmd, libafl_qemu_cpu_index, libafl_qemu_current_cpu, libafl_qemu_gdb_reply, libafl_qemu_get_cpu, - libafl_qemu_num_cpus, libafl_qemu_num_regs, libafl_qemu_read_reg, + libafl_qemu_init, libafl_qemu_num_cpus, libafl_qemu_num_regs, libafl_qemu_read_reg, libafl_qemu_remove_breakpoint, libafl_qemu_set_breakpoint, libafl_qemu_trigger_breakpoint, - libafl_qemu_write_reg, CPUArchState, CPUArchStatePtr, CPUStatePtr, FatPtr, GuestAddr, - GuestPhysAddr, GuestUsize, GuestVirtAddr, TCGTemp, + libafl_qemu_write_reg, CPUArchState, CPUStatePtr, FatPtr, GuestAddr, GuestPhysAddr, GuestUsize, + GuestVirtAddr, }; use num_traits::Num; -#[cfg(feature = "python")] -use pyo3::prelude::*; use strum::IntoEnumIterator; use crate::{GuestAddrKind, GuestReg, Regs}; -#[cfg(emulation_mode = "usermode")] +pub mod config; +use config::{QemuConfig, QemuConfigBuilder, QEMU_CONFIG}; + +#[cfg(feature = "usermode")] mod usermode; -#[cfg(emulation_mode = "usermode")] +#[cfg(feature = "usermode")] pub use usermode::*; -#[cfg(emulation_mode = "systemmode")] +#[cfg(feature = "systemmode")] mod systemmode; -#[cfg(emulation_mode = "systemmode")] +#[cfg(feature = "systemmode")] #[allow(unused_imports)] pub use systemmode::*; -pub const SKIP_EXEC_HOOK: u64 = u64::MAX; +mod hooks; +pub use hooks::*; + static mut QEMU_IS_INITIALIZED: bool = false; -macro_rules! create_hook_id { - ($name:ident, $sys:ident, true) => { - paste::paste! { - #[derive(Clone, Copy, PartialEq, Debug)] - pub struct [<$name HookId>](pub(crate) usize); - impl HookId for [<$name HookId>] { - fn remove(&self, invalidate_block: bool) -> bool { - unsafe { libafl_qemu_sys::$sys(self.0, invalidate_block.into()) != 0 } - } - } - } - }; - ($name:ident, $sys:ident, false) => { - paste::paste! { - #[derive(Clone, Copy, PartialEq, Debug)] - pub struct [<$name HookId>](pub(crate) usize); - impl HookId for [<$name HookId>] { - fn remove(&self, _invalidate_block: bool) -> bool { - unsafe { libafl_qemu_sys::$sys(self.0) != 0 } - } - } - } - }; +#[derive(Debug)] +pub enum QemuError { + Init(QemuInitError), + Exit(QemuExitError), + RW(QemuRWError), } -create_hook_id!(Instruction, libafl_qemu_remove_hook, true); -create_hook_id!(Backdoor, libafl_qemu_remove_backdoor_hook, true); -create_hook_id!(Edge, libafl_qemu_remove_edge_hook, true); -create_hook_id!(Block, libafl_qemu_remove_block_hook, true); -create_hook_id!(Read, libafl_qemu_remove_read_hook, true); -create_hook_id!(Write, libafl_qemu_remove_write_hook, true); -create_hook_id!(Cmp, libafl_qemu_remove_cmp_hook, true); -create_hook_id!(PreSyscall, libafl_qemu_remove_pre_syscall_hook, false); -create_hook_id!(PostSyscall, libafl_qemu_remove_post_syscall_hook, false); -create_hook_id!(NewThread, libafl_qemu_remove_new_thread_hook, false); -create_hook_id!(Jmp, libafl_qemu_remove_jmp_hook, true); +impl From for libafl::Error { + fn from(qemu_error: QemuError) -> Self { + libafl::Error::runtime(qemu_error) + } +} + +impl From for String { + fn from(qemu_error: QemuError) -> Self { + format!("LibAFL QEMU Error: {qemu_error:?}") + } +} #[derive(Debug)] pub enum QemuInitError { @@ -96,9 +77,17 @@ pub enum QemuInitError { #[derive(Debug, Clone)] pub enum QemuExitReason { - End(QemuShutdownCause), // QEMU ended for some reason. - Breakpoint(GuestAddr), // Breakpoint triggered. Contains the address of the trigger. - SyncExit, // Synchronous backdoor: The guest triggered a backdoor and should return to LibAFL. + /// QEMU ended for some internal reason + End(QemuShutdownCause), + + /// Breakpoint triggered. Contains the address of the trigger + Breakpoint(GuestAddr), + + /// Synchronous exit: The guest triggered a backdoor and should return to `LibAFL`. + SyncExit, + + /// Timeout, and it has been requested to be handled by the harness. + Timeout, } #[derive(Debug, Clone)] @@ -107,11 +96,6 @@ pub enum QemuExitError { UnexpectedExit, // Qemu exited without going through an expected exit point. Can be caused by a crash for example. } -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct QemuSnapshotCheckResult { - nb_page_inconsistencies: u64, -} - #[derive(Debug, Clone)] pub enum QemuRWErrorKind { Read, @@ -124,6 +108,7 @@ pub enum QemuRWErrorCause { WrongArgument(i32), CurrentCpuNotFound, Reg(i32), + WrongMemoryLocation(GuestAddr, usize), // addr, size } #[derive(Debug, Clone)] @@ -140,6 +125,19 @@ impl QemuRWError { Self { kind, cause, cpu } } + pub fn wrong_mem_location( + kind: QemuRWErrorKind, + cpu: CPUStatePtr, + addr: GuestAddr, + size: usize, + ) -> Self { + Self::new( + kind, + QemuRWErrorCause::WrongMemoryLocation(addr, size), + Some(cpu), + ) + } + #[must_use] pub fn current_cpu_not_found(kind: QemuRWErrorKind) -> Self { Self::new(kind, QemuRWErrorCause::CurrentCpuNotFound, None) @@ -167,15 +165,6 @@ impl QemuRWError { } } -/// Represents a QEMU snapshot check result for which no error was detected -impl Default for QemuSnapshotCheckResult { - fn default() -> Self { - Self { - nb_page_inconsistencies: 0, - } - } -} - /// The thin wrapper around QEMU. /// It is considered unsafe to use it directly. /// Prefer using `Emulator` instead in case of doubt. @@ -184,31 +173,22 @@ pub struct Qemu { _private: (), } -// syshook_ret -#[repr(C)] -#[cfg_attr(feature = "python", pyclass)] -#[cfg_attr(feature = "python", derive(FromPyObject))] -pub struct SyscallHookResult { - pub retval: GuestAddr, - pub skip_syscall: bool, -} - #[derive(Debug, Clone)] -pub struct EmulatorMemoryChunk { +pub struct QemuMemoryChunk { addr: GuestAddrKind, size: GuestReg, cpu: Option, } #[allow(clippy::vec_box)] -static mut GDB_COMMANDS: Vec> = vec![]; +static mut GDB_COMMANDS: Vec> = Vec::new(); -extern "C" fn gdb_cmd(data: *const (), buf: *const u8, len: usize) -> i32 { +unsafe extern "C" fn gdb_cmd(data: *mut c_void, buf: *mut u8, len: usize) -> bool { unsafe { - let closure = &mut *(data as *mut Box FnMut(&Qemu, &'r str) -> bool>); + let closure = &mut *(data as *mut Box FnMut(Qemu, &'r str) -> bool>); let cmd = std::str::from_utf8_unchecked(std::slice::from_raw_parts(buf, len)); let qemu = Qemu::get_unchecked(); - i32::from(closure(&qemu, cmd)) + closure(qemu, cmd) } } @@ -284,6 +264,7 @@ impl Display for QemuExitReason { QemuExitReason::End(shutdown_cause) => write!(f, "End: {shutdown_cause:?}"), QemuExitReason::Breakpoint(bp) => write!(f, "Breakpoint: {bp}"), QemuExitReason::SyncExit => write!(f, "Sync Exit"), + QemuExitReason::Timeout => write!(f, "Timeout"), } } } @@ -339,15 +320,15 @@ impl From for MemAccessInfo { } pub trait ArchExtras { - fn read_return_address(&self) -> Result - where - T: From; + fn read_return_address(&self) -> Result; fn write_return_address(&self, val: T) -> Result<(), QemuRWError> where T: Into; - fn read_function_argument(&self, conv: CallingConvention, idx: u8) -> Result - where - T: From; + fn read_function_argument( + &self, + conv: CallingConvention, + idx: u8, + ) -> Result; fn write_function_argument( &self, conv: CallingConvention, @@ -360,11 +341,6 @@ pub trait ArchExtras { #[allow(clippy::unused_self)] impl CPU { - #[must_use] - pub fn qemu(&self) -> Qemu { - unsafe { Qemu::get_unchecked() } - } - #[must_use] #[allow(clippy::cast_sign_loss)] pub fn index(&self) -> usize { @@ -377,27 +353,6 @@ impl CPU { } } - #[cfg(emulation_mode = "usermode")] - #[must_use] - pub fn g2h(&self, addr: GuestAddr) -> *mut T { - unsafe { (addr as usize + guest_base) as *mut T } - } - - #[cfg(emulation_mode = "usermode")] - #[must_use] - pub fn h2g(&self, addr: *const T) -> GuestAddr { - unsafe { (addr as usize - guest_base) as GuestAddr } - } - - #[cfg(emulation_mode = "usermode")] - #[must_use] - pub fn access_ok(&self, kind: VerifyAccess, addr: GuestAddr, size: usize) -> bool { - unsafe { - // TODO add support for tagged GuestAddr - libafl_qemu_sys::page_check_range(addr, size as GuestAddr, kind.into()) - } - } - // TODO expose tlb_set_dirty and tlb_reset_dirty #[must_use] @@ -405,35 +360,9 @@ impl CPU { unsafe { libafl_qemu_num_regs(self.ptr) } } - pub fn write_reg(&self, reg: R, val: T) -> Result<(), QemuRWError> + pub fn read_reg(&self, reg: R) -> Result where R: Into + Clone, - T: Into, - { - let reg_id = reg.clone().into(); - #[cfg(feature = "be")] - let val = GuestReg::to_be(val.into()); - - #[cfg(not(feature = "be"))] - let val = GuestReg::to_le(val.into()); - - let success = - unsafe { libafl_qemu_write_reg(self.ptr, reg_id, addr_of!(val) as *const u8) }; - if success == 0 { - Err(QemuRWError { - kind: QemuRWErrorKind::Write, - cause: QemuRWErrorCause::Reg(reg.into()), - cpu: Some(self.ptr), - }) - } else { - Ok(()) - } - } - - pub fn read_reg(&self, reg: R) -> Result - where - R: Into + Clone, - T: From, { unsafe { let reg_id = reg.clone().into(); @@ -455,6 +384,80 @@ impl CPU { } } + pub fn write_reg(&self, reg: R, val: T) -> Result<(), QemuRWError> + where + R: Into + Clone, + T: Into, + { + let reg_id = reg.clone().into(); + #[cfg(feature = "be")] + let val = GuestReg::to_be(val.into()); + + #[cfg(not(feature = "be"))] + let val = GuestReg::to_le(val.into()); + + let success = unsafe { libafl_qemu_write_reg(self.ptr, reg_id, &raw const val as *mut u8) }; + if success == 0 { + Err(QemuRWError { + kind: QemuRWErrorKind::Write, + cause: QemuRWErrorCause::Reg(reg.into()), + cpu: Some(self.ptr), + }) + } else { + Ok(()) + } + } + + /// Read a value from a guest address, taking into account the potential MMU / MPU. + pub fn read_mem(&self, addr: GuestAddr, buf: &mut [u8]) -> Result<(), QemuRWError> { + // TODO use gdbstub's target_cpu_memory_rw_debug + let ret = unsafe { + libafl_qemu_sys::cpu_memory_rw_debug( + self.ptr, + addr as GuestVirtAddr, + buf.as_mut_ptr() as *mut _, + buf.len(), + false, + ) + }; + + if ret != 0 { + Err(QemuRWError::wrong_mem_location( + QemuRWErrorKind::Read, + self.ptr, + addr, + buf.len(), + )) + } else { + Ok(()) + } + } + + /// Write a value to a guest address, taking into account the potential MMU / MPU. + pub fn write_mem(&self, addr: GuestAddr, buf: &[u8]) -> Result<(), QemuRWError> { + // TODO use gdbstub's target_cpu_memory_rw_debug + let ret = unsafe { + libafl_qemu_sys::cpu_memory_rw_debug( + self.ptr, + addr as GuestVirtAddr, + buf.as_ptr() as *mut _, + buf.len(), + true, + ) + }; + + if ret != 0 { + Err(QemuRWError::wrong_mem_location( + QemuRWErrorKind::Write, + self.ptr, + addr, + buf.len(), + )) + } else { + Ok(()) + } + } + pub fn reset(&self) { unsafe { libafl_qemu_sys::cpu_reset(self.ptr) }; } @@ -571,8 +574,14 @@ impl From for HookData { #[allow(clippy::unused_self)] impl Qemu { + /// For more details about the parameters check + /// [the QEMU documentation](https://www.qemu.org/docs/master/about/). + pub fn builder() -> QemuConfigBuilder { + QemuConfig::builder() + } + #[allow(clippy::must_use_candidate, clippy::similar_names)] - pub fn init(args: &[String], env: &[(String, String)]) -> Result { + pub fn init(args: &[String]) -> Result { if args.is_empty() { return Err(QemuInitError::EmptyArgs); } @@ -598,28 +607,28 @@ impl Qemu { .collect(); let mut argv: Vec<*const u8> = args.iter().map(|x| x.as_ptr() as *const u8).collect(); argv.push(ptr::null()); // argv is always null terminated. - let env_strs: Vec = env - .iter() - .map(|(k, v)| format!("{}={}\0", &k, &v)) - .collect(); - let mut envp: Vec<*const u8> = env_strs.iter().map(|x| x.as_bytes().as_ptr()).collect(); - envp.push(null()); + unsafe { - #[cfg(emulation_mode = "usermode")] - qemu_user_init(argc, argv.as_ptr(), envp.as_ptr()); - #[cfg(emulation_mode = "systemmode")] - { - qemu_init(argc, argv.as_ptr(), envp.as_ptr()); - libc::atexit(qemu_cleanup_atexit); - libafl_qemu_sys::syx_snapshot_init(true); - } + libafl_qemu_init(argc, argv.as_ptr() as *mut *mut ::std::os::raw::c_char); + } + + #[cfg(feature = "systemmode")] + unsafe { + libafl_qemu_sys::syx_snapshot_init(true); + libc::atexit(qemu_cleanup_atexit); } Ok(Qemu { _private: () }) } + #[must_use] + pub fn hooks(&self) -> QemuHooks { + unsafe { QemuHooks::get_unchecked() } + } + /// Get a QEMU object. /// Same as `Qemu::get`, but without checking whether QEMU has been correctly initialized. + /// Since Qemu is a ZST, this operation is free. /// /// # Safety /// @@ -641,7 +650,23 @@ impl Qemu { } } - fn post_run(&self) -> Result { + /// Get QEMU configuration. + /// Returns `Some` only if QEMU was initialized with the builder. + /// Returns `None` if QEMU was initialized with `init` and raw string args. + #[must_use] + pub fn get_config(&self) -> Option<&'static QemuConfig> { + QEMU_CONFIG.get() + } + + /// This function will run the emulator until the next breakpoint / sync exit, or until finish. + /// It is a low-level function and simply kicks QEMU. + /// # Safety + /// + /// Should, in general, be safe to call. + /// Of course, the emulated target is not contained securely and can corrupt state or interact with the operating system. + pub unsafe fn run(&self) -> Result { + self.run_inner(); + let exit_reason = unsafe { libafl_get_exit_reason() }; if exit_reason.is_null() { Err(QemuExitError::UnexpectedExit) @@ -698,6 +723,10 @@ impl Qemu { QemuExitReason::Breakpoint(bp_addr) }, libafl_qemu_sys::libafl_exit_reason_kind_SYNC_EXIT => QemuExitReason::SyncExit, + + #[cfg(feature = "systemmode")] + libafl_qemu_sys::libafl_exit_reason_kind_TIMEOUT => QemuExitReason::Timeout, + _ => return Err(QemuExitError::UnknownKind), }) } @@ -735,21 +764,48 @@ impl Qemu { unsafe { libafl_page_from_addr(addr) } } - //#[must_use] - /*pub fn page_size() -> GuestUsize { - unsafe { libafl_page_size } - }*/ - - pub unsafe fn write_mem(&self, addr: GuestAddr, buf: &[u8]) { + /// Read a value from a guest address, taking into account the potential indirections with the current CPU. + pub fn read_mem(&self, addr: GuestAddr, buf: &mut [u8]) -> Result<(), QemuRWError> { self.current_cpu() .unwrap_or_else(|| self.cpu_from_index(0)) - .write_mem(addr, buf); + .read_mem(addr, buf) } - pub unsafe fn read_mem(&self, addr: GuestAddr, buf: &mut [u8]) { + /// Write a value to a guest address, taking into account the potential indirections with the current CPU. + pub fn write_mem(&self, addr: GuestAddr, buf: &[u8]) -> Result<(), QemuRWError> { self.current_cpu() .unwrap_or_else(|| self.cpu_from_index(0)) - .read_mem(addr, buf); + .write_mem(addr, buf) + } + + /// Read a value from a guest address. + /// + /// # Safety + /// In usermode, this will read from a translated guest address. + /// This may only be safely used for valid guest addresses. + /// + /// In any case, no check will be performed on the correctness of the operation. + /// + /// Please refer to [`CPU::read_mem`] for more details. + pub unsafe fn read_mem_unchecked(&self, addr: GuestAddr, buf: &mut [u8]) { + self.current_cpu() + .unwrap_or_else(|| self.cpu_from_index(0)) + .read_mem_unchecked(addr, buf); + } + + /// Write a value to a guest address. + /// + /// # Safety + /// In usermode, this will write to a translated guest address. + /// + /// In any case, no check will be performed on the correctness of the operation. + /// + /// This may only be safely used for valid guest addresses. + /// Please refer to [`CPU::write_mem`] for more details. + pub unsafe fn write_mem_unchecked(&self, addr: GuestAddr, buf: &[u8]) { + self.current_cpu() + .unwrap_or_else(|| self.cpu_from_index(0)) + .write_mem_unchecked(addr, buf); } #[must_use] @@ -767,9 +823,8 @@ impl Qemu { .write_reg(reg, val) } - pub fn read_reg(&self, reg: R) -> Result + pub fn read_reg(&self, reg: R) -> Result where - T: Num + PartialOrd + Copy + From, R: Into + Clone, { self.current_cpu() @@ -778,12 +833,26 @@ impl Qemu { } pub fn set_breakpoint(&self, addr: GuestAddr) { + // Remove thumb bit encoded in addresses. + // Since ARMv7, instructions are (half-)word aligned, so this is safe. + // For ARMv6 and before, this could be wrong since SCTLR.U could be 0. + // TODO: check precisely for architecture before doing this. + #[cfg(target_arch = "arm")] + let addr = { addr & !1 }; + unsafe { libafl_qemu_set_breakpoint(addr.into()); } } pub fn remove_breakpoint(&self, addr: GuestAddr) { + // Remove thumb bit encoded in addresses. + // Since ARMv7, instructions are (half-)word aligned, so this is safe. + // For ARMv6 and before, this could be wrong since SCTLR.U could be 0. + // TODO: check precisely for architecture before doing this. + #[cfg(target_arch = "arm")] + let addr = { addr & !1 }; + unsafe { libafl_qemu_remove_breakpoint(addr.into()); } @@ -806,203 +875,23 @@ impl Qemu { } } - // TODO set T lifetime to be like Emulator - #[allow(clippy::missing_transmute_annotations)] - pub fn set_hook>( - &self, - data: T, - addr: GuestAddr, - callback: extern "C" fn(T, GuestAddr), - invalidate_block: bool, - ) -> InstructionHookId { - unsafe { - let data: u64 = data.into().0; - let callback: extern "C" fn(u64, GuestAddr) = transmute(callback); - let num = libafl_qemu_sys::libafl_qemu_set_hook( - addr.into(), - Some(callback), - data, - i32::from(invalidate_block), - ); - InstructionHookId(num) - } - } - #[must_use] - pub fn remove_hook(&self, id: impl HookId, invalidate_block: bool) -> bool { + pub fn remove_hook(&self, id: &impl HookId, invalidate_block: bool) -> bool { id.remove(invalidate_block) } - #[must_use] - pub fn remove_hooks_at(&self, addr: GuestAddr, invalidate_block: bool) -> usize { - unsafe { - libafl_qemu_sys::libafl_qemu_remove_hooks_at(addr.into(), i32::from(invalidate_block)) - } - } - - #[allow(clippy::missing_transmute_annotations)] - pub fn add_edge_hooks>( - &self, - data: T, - gen: Option u64>, - exec: Option, - ) -> EdgeHookId { - unsafe { - let data: u64 = data.into().0; - let gen: Option u64> = - transmute(gen); - let exec: Option = transmute(exec); - let num = libafl_qemu_sys::libafl_add_edge_hook(gen, exec, data); - EdgeHookId(num) - } - } - - #[allow(clippy::missing_transmute_annotations)] - pub fn add_block_hooks>( - &self, - data: T, - gen: Option u64>, - post_gen: Option, - exec: Option, - ) -> BlockHookId { - unsafe { - let data: u64 = data.into().0; - let gen: Option u64> = transmute(gen); - let post_gen: Option = - transmute(post_gen); - let exec: Option = transmute(exec); - let num = libafl_qemu_sys::libafl_add_block_hook(gen, post_gen, exec, data); - BlockHookId(num) - } - } - - /// `data` can be used to pass data that can be accessed as the first argument in the `gen` and the `exec` functions + /// # Safety /// - /// `gen` gets passed the current programm counter, mutable access to a `TCGTemp` and information about the memory - /// access being performed. - /// The `u64` return value is an id that gets passed to the `exec` functions as their second argument. - /// - /// `exec` hooks get invoked on every read performed by the guest - /// - /// `exec1`-`exec8` special case accesses of width 1-8 - /// - /// If there is no specialized hook for a given read width, the `exec_n` will be - /// called and its last argument will specify the access width - #[allow(clippy::missing_transmute_annotations)] - pub fn add_read_hooks>( - &self, - data: T, - gen: Option u64>, - exec1: Option, - exec2: Option, - exec4: Option, - exec8: Option, - exec_n: Option, - ) -> ReadHookId { - unsafe { - let data: u64 = data.into().0; - let gen: Option< - unsafe extern "C" fn( - u64, - GuestAddr, - *mut TCGTemp, - libafl_qemu_sys::MemOpIdx, - ) -> u64, - > = transmute(gen); - let exec1: Option = transmute(exec1); - let exec2: Option = transmute(exec2); - let exec4: Option = transmute(exec4); - let exec8: Option = transmute(exec8); - let exec_n: Option = - transmute(exec_n); - let num = libafl_qemu_sys::libafl_add_read_hook( - gen, exec1, exec2, exec4, exec8, exec_n, data, - ); - ReadHookId(num) - } - } - - // TODO add MemOp info - #[allow(clippy::missing_transmute_annotations)] - pub fn add_write_hooks>( - &self, - data: T, - gen: Option u64>, - exec1: Option, - exec2: Option, - exec4: Option, - exec8: Option, - exec_n: Option, - ) -> WriteHookId { - unsafe { - let data: u64 = data.into().0; - let gen: Option< - unsafe extern "C" fn( - u64, - GuestAddr, - *mut TCGTemp, - libafl_qemu_sys::MemOpIdx, - ) -> u64, - > = transmute(gen); - let exec1: Option = transmute(exec1); - let exec2: Option = transmute(exec2); - let exec4: Option = transmute(exec4); - let exec8: Option = transmute(exec8); - let exec_n: Option = - transmute(exec_n); - let num = libafl_qemu_sys::libafl_add_write_hook( - gen, exec1, exec2, exec4, exec8, exec_n, data, - ); - WriteHookId(num) - } - } - - #[allow(clippy::missing_transmute_annotations)] - pub fn add_cmp_hooks>( - &self, - data: T, - gen: Option u64>, - exec1: Option, - exec2: Option, - exec4: Option, - exec8: Option, - ) -> CmpHookId { - unsafe { - let data: u64 = data.into().0; - let gen: Option u64> = transmute(gen); - let exec1: Option = transmute(exec1); - let exec2: Option = transmute(exec2); - let exec4: Option = transmute(exec4); - let exec8: Option = transmute(exec8); - let num = libafl_qemu_sys::libafl_add_cmp_hook(gen, exec1, exec2, exec4, exec8, data); - CmpHookId(num) - } - } - - #[allow(clippy::missing_transmute_annotations)] - pub fn add_backdoor_hook>( - &self, - data: T, - callback: extern "C" fn(T, CPUArchStatePtr, GuestAddr), - ) -> BackdoorHookId { - unsafe { - let data: u64 = data.into().0; - let callback: extern "C" fn(u64, CPUArchStatePtr, GuestAddr) = transmute(callback); - let num = libafl_qemu_sys::libafl_add_backdoor_hook(Some(callback), data); - BackdoorHookId(num) - } - } - + /// Calling this multiple times concurrently will access static variables and is unsafe. #[allow(clippy::type_complexity)] - pub fn add_gdb_cmd(&self, callback: Box bool>) { - unsafe { - let fat: Box = Box::new(transmute::< - Box FnMut(&'a Qemu, &'b str) -> bool>, - FatPtr, - >(callback)); - libafl_qemu_add_gdb_cmd(gdb_cmd, ptr::from_ref(&*fat) as *const ()); - GDB_COMMANDS.push(fat); - } + pub unsafe fn add_gdb_cmd(&self, callback: Box bool>) { + let fat: Box = Box::new(transmute::< + Box FnMut(&'a Qemu, &'b str) -> bool>, + FatPtr, + >(callback)); + libafl_qemu_add_gdb_cmd(Some(gdb_cmd), ptr::from_ref(&*fat) as *mut c_void); + let commands_ptr = &raw mut GDB_COMMANDS; + (*commands_ptr).push(fat); } pub fn gdb_reply(&self, output: &str) { @@ -1032,17 +921,14 @@ impl Qemu { } impl ArchExtras for Qemu { - fn read_return_address(&self) -> Result - where - T: From, - { + fn read_return_address(&self) -> Result { self.current_cpu() .ok_or(QemuRWError { kind: QemuRWErrorKind::Read, cause: QemuRWErrorCause::CurrentCpuNotFound, cpu: None, })? - .read_return_address::() + .read_return_address() } fn write_return_address(&self, val: T) -> Result<(), QemuRWError> @@ -1054,13 +940,14 @@ impl ArchExtras for Qemu { .write_return_address::(val) } - fn read_function_argument(&self, conv: CallingConvention, idx: u8) -> Result - where - T: From, - { + fn read_function_argument( + &self, + conv: CallingConvention, + idx: u8, + ) -> Result { self.current_cpu() .ok_or(QemuRWError::current_cpu_not_found(QemuRWErrorKind::Read))? - .read_function_argument::(conv, idx) + .read_function_argument(conv, idx) } fn write_function_argument( @@ -1108,7 +995,7 @@ impl PartialOrd for GuestAddrKind { } } -impl EmulatorMemoryChunk { +impl QemuMemoryChunk { #[must_use] pub fn addr(&self) -> GuestAddrKind { self.addr @@ -1138,7 +1025,7 @@ impl EmulatorMemoryChunk { } #[must_use] - pub fn get_slice(&self, range: &Range) -> Option { + pub fn get_slice(&self, range: &Range) -> Option { let new_addr = self.addr + range.start; let slice_size = range.clone().count(); @@ -1153,9 +1040,43 @@ impl EmulatorMemoryChunk { }) } + /// Returns the number of bytes effectively read. + /// output will get chunked at `size` bytes. + pub fn read(&self, qemu: Qemu, output: &mut [u8]) -> Result { + let max_len: usize = self.size.try_into().unwrap(); + + let output_sliced = if output.len() > max_len { + &mut output[0..max_len] + } else { + output + }; + + match self.addr { + GuestAddrKind::Physical(hwaddr) => { + #[cfg(feature = "usermode")] + { + // For now the default behaviour is to fall back to virtual addresses + qemu.read_mem(hwaddr.try_into().unwrap(), output_sliced)?; + } + #[cfg(feature = "systemmode")] + unsafe { + qemu.read_phys_mem(hwaddr, output_sliced); + } + } + GuestAddrKind::Virtual(vaddr) => unsafe { + self.cpu + .as_ref() + .unwrap() + .read_mem_unchecked(vaddr.try_into().unwrap(), output_sliced); + }, + }; + + Ok(output_sliced.len().try_into().unwrap()) + } + /// Returns the number of bytes effectively written. - #[must_use] - pub fn write(&self, qemu: &Qemu, input: &[u8]) -> GuestReg { + /// Input will get chunked at `size` bytes. + pub fn write(&self, qemu: Qemu, input: &[u8]) -> Result { let max_len: usize = self.size.try_into().unwrap(); let input_sliced = if input.len() > max_len { @@ -1165,62 +1086,26 @@ impl EmulatorMemoryChunk { }; match self.addr { - GuestAddrKind::Physical(hwaddr) => unsafe { - #[cfg(emulation_mode = "usermode")] + GuestAddrKind::Physical(hwaddr) => { + #[cfg(feature = "usermode")] { // For now the default behaviour is to fall back to virtual addresses - qemu.write_mem(hwaddr.try_into().unwrap(), input_sliced); + qemu.write_mem(hwaddr.try_into().unwrap(), input_sliced)?; } - #[cfg(emulation_mode = "systemmode")] - { + #[cfg(feature = "systemmode")] + unsafe { qemu.write_phys_mem(hwaddr, input_sliced); } - }, - GuestAddrKind::Virtual(vaddr) => unsafe { + } + GuestAddrKind::Virtual(vaddr) => { self.cpu .as_ref() .unwrap() - .write_mem(vaddr.try_into().unwrap(), input_sliced); - }, + .write_mem(vaddr.try_into().unwrap(), input_sliced)?; + } }; - input_sliced.len().try_into().unwrap() - } -} - -#[cfg(feature = "python")] -#[pymethods] -impl SyscallHookResult { - #[new] - #[must_use] - pub fn new(value: Option) -> Self { - value.map_or( - Self { - retval: 0, - skip_syscall: false, - }, - |v| Self { - retval: v, - skip_syscall: true, - }, - ) - } -} - -#[cfg(not(feature = "python"))] -impl SyscallHookResult { - #[must_use] - pub fn new(value: Option) -> Self { - value.map_or( - Self { - retval: 0, - skip_syscall: false, - }, - |v| Self { - retval: v, - skip_syscall: true, - }, - ) + Ok(input_sliced.len().try_into().unwrap()) } } @@ -1233,7 +1118,10 @@ pub mod pybind { static mut PY_GENERIC_HOOKS: Vec<(GuestAddr, PyObject)> = vec![]; extern "C" fn py_generic_hook_wrapper(idx: u64, _pc: GuestAddr) { - let obj = unsafe { &PY_GENERIC_HOOKS[idx as usize].1 }; + let obj = unsafe { + let hooks = &raw mut PY_GENERIC_HOOKS; + &(*hooks)[idx as usize].1 + }; Python::with_gil(|py| { obj.call0(py).expect("Error in the hook"); }); @@ -1248,9 +1136,9 @@ pub mod pybind { impl Qemu { #[allow(clippy::needless_pass_by_value)] #[new] - fn new(args: Vec, env: Vec<(String, String)>) -> PyResult { - let qemu = super::Qemu::init(&args, &env) - .map_err(|e| PyValueError::new_err(format!("{e}")))?; + fn new(args: Vec) -> PyResult { + let qemu = + super::Qemu::init(&args).map_err(|e| PyValueError::new_err(format!("{e}")))?; Ok(Qemu { qemu }) } @@ -1262,16 +1150,16 @@ pub mod pybind { } fn write_mem(&self, addr: GuestAddr, buf: &[u8]) { - unsafe { - self.qemu.write_mem(addr, buf); - } + self.qemu + .write_mem(addr, buf) + .expect("Write to memory failed."); } fn read_mem(&self, addr: GuestAddr, size: usize) -> Vec { let mut buf = vec![0; size]; - unsafe { - self.qemu.read_mem(addr, &mut buf); - } + self.qemu + .read_mem(addr, &mut buf) + .expect("Read to memory failed."); buf } @@ -1307,20 +1195,30 @@ pub mod pybind { self.qemu.flush_jit(); } - fn set_hook(&self, addr: GuestAddr, hook: PyObject) { + /// # Safety + /// Removes a hooke from `PY_GENERIC_HOOKS` -> may not be called concurrently! + unsafe fn set_hook(&self, addr: GuestAddr, hook: PyObject) { unsafe { - let idx = PY_GENERIC_HOOKS.len(); - PY_GENERIC_HOOKS.push((addr, hook)); - self.qemu - .set_hook(idx as u64, addr, py_generic_hook_wrapper, true); + let hooks = &raw mut PY_GENERIC_HOOKS; + let idx = (*hooks).len(); + (*hooks).push((addr, hook)); + self.qemu.hooks().add_instruction_hooks( + idx as u64, + addr, + py_generic_hook_wrapper, + true, + ); } } - fn remove_hooks_at(&self, addr: GuestAddr) -> usize { + /// # Safety + /// Removes a hooke from `PY_GENERIC_HOOKS` -> may not be called concurrently! + unsafe fn remove_hooks_at(&self, addr: GuestAddr) -> usize { unsafe { - PY_GENERIC_HOOKS.retain(|(a, _)| *a != addr); + let hooks = &raw mut PY_GENERIC_HOOKS; + (*hooks).retain(|(a, _)| *a != addr); } - self.qemu.remove_hooks_at(addr, true) + self.qemu.hooks().remove_instruction_hooks_at(addr, true) } } } diff --git a/libafl_qemu/src/qemu/systemmode.rs b/libafl_qemu/src/qemu/systemmode.rs index 6dfae7d4cd..86b8716606 100644 --- a/libafl_qemu/src/qemu/systemmode.rs +++ b/libafl_qemu/src/qemu/systemmode.rs @@ -10,16 +10,17 @@ use bytes_utils::SegmentedBuf; use libafl_qemu_sys::{ libafl_load_qemu_snapshot, libafl_page_from_addr, libafl_qemu_current_paging_id, libafl_save_qemu_snapshot, libafl_start_int_timer, qemu_cleanup, qemu_main_loop, vm_start, GuestAddr, GuestPhysAddr, GuestUsize, GuestVirtAddr }; +use libc::EXIT_SUCCESS; use num_traits::Zero; use crate::{ - EmulatorMemoryChunk, FastSnapshotPtr, GuestAddrKind, MemAccessInfo, Qemu, QemuExitError, - QemuExitReason, QemuSnapshotCheckResult, CPU, + FastSnapshotPtr, GuestAddrKind, MemAccessInfo, Qemu, QemuMemoryChunk, QemuRWError, + QemuRWErrorCause, QemuRWErrorKind, QemuSnapshotCheckResult, CPU, }; pub(super) extern "C" fn qemu_cleanup_atexit() { unsafe { - qemu_cleanup(); + qemu_cleanup(EXIT_SUCCESS); } } @@ -137,38 +138,40 @@ impl CPU { } } - /// Write a value to a guest address. + /// Read a value from a guest address, taking into account the potential MMU / MPU. /// /// # Safety - /// This will write to a translated guest address (using `g2h`). - /// It just adds `guest_base` and writes to that location, without checking the bounds. - /// This may only be safely used for valid guest addresses! - pub unsafe fn write_mem(&self, addr: GuestAddr, buf: &[u8]) { + /// no check is done on the correctness of the operation. + /// if a problem occurred during the operation, there will be no feedback + pub unsafe fn read_mem_unchecked(&self, addr: GuestAddr, buf: &mut [u8]) { // TODO use gdbstub's target_cpu_memory_rw_debug - libafl_qemu_sys::cpu_memory_rw_debug( - self.ptr, - addr as GuestVirtAddr, - buf.as_ptr() as *mut _, - buf.len(), - true, - ); + unsafe { + libafl_qemu_sys::cpu_memory_rw_debug( + self.ptr, + addr as GuestVirtAddr, + buf.as_mut_ptr() as *mut _, + buf.len(), + false, + ) + }; } - /// Read a value from a guest address. + /// Write a value to a guest address, taking into account the potential MMU / MPU. /// /// # Safety - /// This will read from a translated guest address (using `g2h`). - /// It just adds `guest_base` and writes to that location, without checking the bounds. - /// This may only be safely used for valid guest addresses! - pub unsafe fn read_mem(&self, addr: GuestAddr, buf: &mut [u8]) { + /// no check is done on the correctness of the operation. + /// if a problem occurred during the operation, there will be no feedback + pub fn write_mem_unchecked(&self, addr: GuestAddr, buf: &[u8]) { // TODO use gdbstub's target_cpu_memory_rw_debug - libafl_qemu_sys::cpu_memory_rw_debug( - self.ptr, - addr as GuestVirtAddr, - buf.as_mut_ptr() as *mut _, - buf.len(), - false, - ); + unsafe { + libafl_qemu_sys::cpu_memory_rw_debug( + self.ptr, + addr as GuestVirtAddr, + buf.as_ptr() as *mut _, + buf.len(), + true, + ) + }; } } @@ -179,6 +182,12 @@ impl Qemu { } /// Write a value to a physical guest address, including ROM areas. + /// + /// # Safety + /// + /// No check is done on the correctness of the operation at the moment. + /// Nothing bad will happen if the operation is incorrect, but it will be silently skipped. + // TODO: use address_space_rw and check for the result MemTxResult pub unsafe fn write_phys_mem(&self, paddr: GuestPhysAddr, buf: &[u8]) { libafl_qemu_sys::cpu_physical_memory_rw( paddr, @@ -188,7 +197,13 @@ impl Qemu { ); } - /// Read a value from a physical guest address. + /// Read a value from a physical guest address, including ROM areas. + /// + /// # Safety + /// + /// No check is done on the correctness of the operation at the moment. + /// Nothing bad will happen if the operation is incorrect, but it will be silently skipped. + // TODO: use address_space_rw and check for the result MemTxResult pub unsafe fn read_phys_mem(&self, paddr: GuestPhysAddr, buf: &mut [u8]) { libafl_qemu_sys::cpu_physical_memory_rw( paddr, @@ -198,28 +213,20 @@ impl Qemu { ); } - /// This function will run the emulator until the next breakpoint / sync exit, or until finish. - /// It is a low-level function and simply kicks QEMU. - /// # Safety - /// - /// Should, in general, be safe to call. - /// Of course, the emulated target is not contained securely and can corrupt state or interact with the operating system. - pub unsafe fn run(&self) -> Result { - libafl_start_int_timer(); + pub(super) unsafe fn run_inner(&self) { + libafl_start_int_timer(); // prepare interrupt timers vm_start(); qemu_main_loop(); - - self.post_run() } pub fn save_snapshot(&self, name: &str, sync: bool) { let s = CString::new(name).expect("Invalid snapshot name"); - unsafe { libafl_save_qemu_snapshot(s.as_ptr() as *const _, sync) }; + unsafe { libafl_save_qemu_snapshot(s.as_ptr() as *mut i8, sync) }; } pub fn load_snapshot(&self, name: &str, sync: bool) { let s = CString::new(name).expect("Invalid snapshot name"); - unsafe { libafl_load_qemu_snapshot(s.as_ptr() as *const _, sync) }; + unsafe { libafl_load_qemu_snapshot(s.as_ptr() as *mut i8, sync) }; } #[must_use] @@ -261,9 +268,7 @@ impl Qemu { ) -> QemuSnapshotCheckResult { let check_result = libafl_qemu_sys::syx_snapshot_check(ref_snapshot); - QemuSnapshotCheckResult { - nb_page_inconsistencies: check_result.nb_inconsistencies, - } + QemuSnapshotCheckResult::new(check_result.nb_inconsistencies) } pub fn list_devices(&self) -> Vec { @@ -294,7 +299,7 @@ impl Qemu { } } -impl EmulatorMemoryChunk { +impl QemuMemoryChunk { pub fn phys_iter(&self, qemu: Qemu) -> PhysMemoryIter { PhysMemoryIter { addr: self.addr, diff --git a/libafl_qemu/src/qemu/usermode.rs b/libafl_qemu/src/qemu/usermode.rs index fe5fb1eca7..d4d3b89efa 100644 --- a/libafl_qemu/src/qemu/usermode.rs +++ b/libafl_qemu/src/qemu/usermode.rs @@ -1,22 +1,19 @@ use std::{ - intrinsics::copy_nonoverlapping, mem::MaybeUninit, slice::from_raw_parts, - str::from_utf8_unchecked, + intrinsics::copy_nonoverlapping, mem::MaybeUninit, slice::from_raw_parts_mut, + str::from_utf8_unchecked_mut, }; use libafl_qemu_sys::{ - exec_path, free_self_maps, guest_base, libafl_dump_core_hook, libafl_force_dfl, libafl_get_brk, - libafl_load_addr, libafl_maps_first, libafl_maps_next, libafl_qemu_run, libafl_set_brk, - mmap_next_start, pageflags_get_root, read_self_maps, strlen, GuestAddr, GuestUsize, - IntervalTreeNode, IntervalTreeRoot, MapInfo, MmapPerms, VerifyAccess, + exec_path, free_self_maps, guest_base, libafl_force_dfl, libafl_get_brk, libafl_load_addr, + libafl_maps_first, libafl_maps_next, libafl_qemu_run, libafl_set_brk, mmap_next_start, + pageflags_get_root, read_self_maps, GuestAddr, GuestUsize, IntervalTreeNode, IntervalTreeRoot, + MapInfo, MmapPerms, VerifyAccess, }; -use libc::c_int; +use libc::{c_int, c_uchar, strlen}; #[cfg(feature = "python")] -use pyo3::{pyclass, pymethods, IntoPy, PyObject, PyRef, PyRefMut, Python}; +use pyo3::{pyclass, pymethods, IntoPyObject, Py, PyRef, PyRefMut, Python}; -use crate::{ - HookData, NewThreadHookId, PostSyscallHookId, PreSyscallHookId, Qemu, QemuExitError, - QemuExitReason, SyscallHookResult, CPU, -}; +use crate::{Qemu, CPU}; #[cfg_attr(feature = "python", pyclass(unsendable))] pub struct GuestMaps { @@ -68,8 +65,9 @@ impl GuestMaps { fn __iter__(slf: PyRef) -> PyRef { slf } - fn __next__(mut slf: PyRefMut) -> Option { - Python::with_gil(|py| slf.next().map(|x| x.into_py(py))) + + fn __next__(mut slf: PyRefMut) -> Option> { + Python::with_gil(|py| slf.next().map(|x| x.into_pyobject(py).unwrap().into())) } } @@ -82,27 +80,47 @@ impl Drop for GuestMaps { } impl CPU { - /// Write a value to a guest address. - /// - /// # Safety - /// This will write to a translated guest address (using `g2h`). - /// It just adds `guest_base` and writes to that location, without checking the bounds. - /// This may only be safely used for valid guest addresses! - pub unsafe fn write_mem(&self, addr: GuestAddr, buf: &[u8]) { - let host_addr = Qemu::get().unwrap().g2h(addr); - copy_nonoverlapping(buf.as_ptr(), host_addr, buf.len()); - } - /// Read a value from a guest address. + /// The input address is not checked for validity. /// /// # Safety /// This will read from a translated guest address (using `g2h`). /// It just adds `guest_base` and writes to that location, without checking the bounds. /// This may only be safely used for valid guest addresses! - pub unsafe fn read_mem(&self, addr: GuestAddr, buf: &mut [u8]) { + pub unsafe fn read_mem_unchecked(&self, addr: GuestAddr, buf: &mut [u8]) { let host_addr = Qemu::get().unwrap().g2h(addr); copy_nonoverlapping(host_addr, buf.as_mut_ptr(), buf.len()); } + + /// Write a value to a guest address. + /// The input address in not checked for validity. + /// + /// # Safety + /// This will write to a translated guest address (using `g2h`). + /// It just adds `guest_base` and writes to that location, without checking the bounds. + /// This may only be safely used for valid guest addresses! + pub unsafe fn write_mem_unchecked(&self, addr: GuestAddr, buf: &[u8]) { + let host_addr = Qemu::get().unwrap().g2h(addr); + copy_nonoverlapping(buf.as_ptr(), host_addr, buf.len()); + } + + #[must_use] + pub fn g2h(&self, addr: GuestAddr) -> *mut T { + unsafe { (addr as usize + guest_base) as *mut T } + } + + #[must_use] + pub fn h2g(&self, addr: *const T) -> GuestAddr { + unsafe { (addr as usize - guest_base) as GuestAddr } + } + + #[must_use] + pub fn access_ok(&self, kind: VerifyAccess, addr: GuestAddr, size: usize) -> bool { + unsafe { + // TODO add support for tagged GuestAddr + libafl_qemu_sys::page_check_range(addr, size as GuestAddr, kind.into()) + } + } } #[allow(clippy::unused_self)] @@ -135,20 +153,18 @@ impl Qemu { } } - /// This function will run the emulator until the next breakpoint, or until finish. - /// # Safety - /// - /// Should, in general, be safe to call. - /// Of course, the emulated target is not contained securely and can corrupt state or interact with the operating system. - pub unsafe fn run(&self) -> Result { + pub(super) unsafe fn run_inner(self) { libafl_qemu_run(); - - self.post_run() } #[must_use] pub fn binary_path<'a>(&self) -> &'a str { - unsafe { from_utf8_unchecked(from_raw_parts(exec_path, strlen(exec_path))) } + unsafe { + from_utf8_unchecked_mut(from_raw_parts_mut( + exec_path as *mut c_uchar, + strlen(exec_path.cast_const()), + )) + } } #[must_use] @@ -237,110 +253,20 @@ impl Qemu { Err(format!("Failed to unmap {addr}")) } } - - #[allow(clippy::type_complexity)] - pub fn add_pre_syscall_hook>( - &self, - data: T, - callback: extern "C" fn( - T, - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> SyscallHookResult, - ) -> PreSyscallHookId { - unsafe { - let data: u64 = data.into().0; - let callback: extern "C" fn( - u64, - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> libafl_qemu_sys::syshook_ret = core::mem::transmute(callback); - let num = libafl_qemu_sys::libafl_add_pre_syscall_hook(Some(callback), data); - PreSyscallHookId(num) - } - } - - #[allow(clippy::type_complexity)] - pub fn add_post_syscall_hook>( - &self, - data: T, - callback: extern "C" fn( - T, - GuestAddr, - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> GuestAddr, - ) -> PostSyscallHookId { - unsafe { - let data: u64 = data.into().0; - let callback: extern "C" fn( - u64, - GuestAddr, - i32, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - GuestAddr, - ) -> GuestAddr = core::mem::transmute(callback); - let num = libafl_qemu_sys::libafl_add_post_syscall_hook(Some(callback), data); - PostSyscallHookId(num) - } - } - - pub fn add_new_thread_hook>( - &self, - data: T, - callback: extern "C" fn(T, tid: u32) -> bool, - ) -> NewThreadHookId { - unsafe { - let data: u64 = data.into().0; - let callback: extern "C" fn(u64, u32) -> bool = core::mem::transmute(callback); - let num = libafl_qemu_sys::libafl_add_new_thread_hook(Some(callback), data); - NewThreadHookId(num) - } - } - - #[allow(clippy::type_complexity)] - pub fn set_crash_hook(&self, callback: extern "C" fn(i32)) { - unsafe { - libafl_dump_core_hook = callback; - } - } } #[cfg(feature = "python")] pub mod pybind { use libafl_qemu_sys::{GuestAddr, MmapPerms}; use pyo3::{ - exceptions::PyValueError, pymethods, types::PyInt, FromPyObject, PyObject, PyResult, Python, + conversion::FromPyObject, + exceptions::PyValueError, + pymethods, + types::{PyAnyMethods, PyInt}, + Bound, PyObject, PyResult, Python, }; - use crate::{pybind::Qemu, SyscallHookResult}; + use crate::{pybind::Qemu, qemu::hooks::SyscallHookResult}; static mut PY_SYSCALL_HOOK: Option = None; @@ -362,17 +288,17 @@ pub mod pybind { let args = (sys_num, a0, a1, a2, a3, a4, a5, a6, a7); Python::with_gil(|py| { let ret = obj.call1(py, args).expect("Error in the syscall hook"); - let any = ret.as_ref(py); + let any = ret.bind(py); if any.is_none() { SyscallHookResult::new(None) } else { - let a: Result<&PyInt, _> = any.downcast(); + let a: Result<&Bound<'_, PyInt>, _> = any.downcast_exact(); if let Ok(i) = a { SyscallHookResult::new(Some( i.extract().expect("Invalid syscall hook return value"), )) } else { - SyscallHookResult::extract(any) + SyscallHookResult::extract_bound(ret.bind(py)) .expect("The syscall hook must return a SyscallHookResult") } } @@ -433,11 +359,14 @@ pub mod pybind { self.qemu.unmap(addr, size).map_err(PyValueError::new_err) } - fn set_syscall_hook(&self, hook: PyObject) { + /// # Safety + /// Accesses the global `PY_SYSCALL_HOOK` and may not be called concurrently. + unsafe fn set_syscall_hook(&self, hook: PyObject) { unsafe { PY_SYSCALL_HOOK = Some(hook); } self.qemu + .hooks() .add_pre_syscall_hook(0u64, py_syscall_hook_wrapper); } } diff --git a/libafl_qemu/src/sync_exit.rs b/libafl_qemu/src/sync_exit.rs index 3d18fedd0b..558e28bb60 100644 --- a/libafl_qemu/src/sync_exit.rs +++ b/libafl_qemu/src/sync_exit.rs @@ -1,15 +1,9 @@ -use std::{ - fmt::{Display, Formatter}, - rc::Rc, -}; +use std::fmt::{Debug, Formatter}; use enum_map::Enum; -use libafl::state::{HasExecutions, State}; +use libafl::inputs::UsesInput; -use crate::{ - command::{CommandManager, IsCommand}, - get_exit_arch_regs, EmulatorExitHandler, GuestReg, QemuHelperTuple, Regs, CPU, -}; +use crate::{command::CommandManager, get_exit_arch_regs, GuestReg, Regs, CPU}; #[derive(Debug, Clone, Enum)] pub enum ExitArgs { @@ -23,32 +17,49 @@ pub enum ExitArgs { Arg6, } -#[derive(Debug)] -pub struct SyncExit +pub struct SyncExit where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, { - command: Rc>, + command: CM::Commands, } -impl SyncExit +impl Clone for SyncExit where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, + CM: CommandManager, + S: UsesInput, +{ + fn clone(&self) -> Self { + Self { + command: self.command.clone(), + } + } +} + +impl Debug for SyncExit +where + CM: CommandManager, + S: UsesInput, +{ + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "Sync Exit") + } +} + +impl SyncExit +where + CM: CommandManager, + S: UsesInput, { #[must_use] - pub fn new(command: Rc>) -> Self { + pub fn new(command: CM::Commands) -> Self { Self { command } } #[must_use] - pub fn command(&self) -> Rc> { - self.command.clone() + pub fn command(&self) -> &CM::Commands { + &self.command } pub fn ret(&self, cpu: &CPU, value: GuestReg) { @@ -61,15 +72,3 @@ where get_exit_arch_regs()[ExitArgs::Ret] } } - -impl Display for SyncExit -where - CM: CommandManager, - E: EmulatorExitHandler, - QT: QemuHelperTuple, - S: State + HasExecutions, -{ - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.command) - } -} diff --git a/libafl_sugar/Cargo.toml b/libafl_sugar/Cargo.toml index 10b5f33aed..b32a3ff708 100644 --- a/libafl_sugar/Cargo.toml +++ b/libafl_sugar/Cargo.toml @@ -10,7 +10,13 @@ license = "MIT OR Apache-2.0" keywords = ["fuzzing"] edition = "2021" build = "build.rs" -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] [package.metadata.docs.rs] all-features = true @@ -18,7 +24,9 @@ all-features = true [features] default = [] document-features = ["dep:document-features"] -clippy = [ "libafl_qemu/clippy" ] # special feature for clippy, don't use in normal projects +clippy = [ + "libafl_qemu/clippy", +] # special feature for clippy, don't use in normal projects #! # Feature Flags #! ## General Features @@ -43,25 +51,32 @@ mips = ["libafl_qemu/mips"] ppc = ["libafl_qemu/ppc"] ## build qemu for hexagon hexagon = ["libafl_qemu/hexagon"] +## build qemu for riscv 32bit +riscv32 = ["libafl_qemu/riscv32"] +## build qemu for riscv 64bit +riscv64 = ["libafl_qemu/riscv64"] [build-dependencies] -pyo3-build-config = { version = "0.21", optional = true } +pyo3-build-config = { workspace = true, optional = true } [dependencies] -libafl = { path = "../libafl", version = "0.13.0" } -libafl_bolts = { path = "../libafl_bolts", version = "0.13.0" } -libafl_targets = { path = "../libafl_targets", version = "0.13.0" } +libafl = { workspace = true, default-features = true } +libafl_bolts = { workspace = true, default-features = true } +libafl_targets = { workspace = true, default-features = true } # Document all features of this crate (for `cargo doc`) -document-features = { version = "0.2", optional = true } +document-features = { workspace = true, optional = true } -typed-builder = "0.18" # Implement the builder pattern at compiletime -pyo3 = { version = "0.18", optional = true } -log = "0.4.20" +typed-builder = { workspace = true } # Implement the builder pattern at compiletime +pyo3 = { workspace = true, optional = true } +log = { workspace = true } [target.'cfg(target_os = "linux")'.dependencies] -libafl_qemu = { path = "../libafl_qemu", version = "0.13.0" } +libafl_qemu = { workspace = true, default-features = true } [lib] name = "libafl_sugar" crate-type = ["cdylib", "rlib"] + +[lints] +workspace = true diff --git a/libafl_sugar/src/forkserver.rs b/libafl_sugar/src/forkserver.rs index 9ac5574670..07541cf847 100644 --- a/libafl_sugar/src/forkserver.rs +++ b/libafl_sugar/src/forkserver.rs @@ -12,7 +12,8 @@ use libafl::{ generators::RandBytesGenerator, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::Tokens, }, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, @@ -23,6 +24,7 @@ use libafl::{ }; use libafl_bolts::{ core_affinity::Cores, + nonzero, rands::StdRand, shmem::{ShMem, ShMemProvider, UnixShMemProvider}, tuples::{tuple_list, Handled, Merge}, @@ -75,7 +77,7 @@ pub struct ForkserverBytesCoverageSugar<'a> { } #[allow(clippy::similar_names)] -impl<'a> ForkserverBytesCoverageSugar<'a> { +impl ForkserverBytesCoverageSugar<'_> { /// Runs the fuzzer. #[allow(clippy::too_many_lines, clippy::similar_names)] pub fn run(&mut self) { @@ -212,7 +214,7 @@ impl<'a> ForkserverBytesCoverageSugar<'a> { if state.must_load_initial_inputs() { if self.input_dirs.is_empty() { // Generator of printable bytearrays of max size 32 - let mut generator = RandBytesGenerator::new(32); + let mut generator = RandBytesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state @@ -335,6 +337,16 @@ pub mod pybind { /// Create a new [`ForkserverBytesCoverageSugar`] #[new] #[allow(clippy::too_many_arguments)] + #[pyo3(signature = ( + input_dirs, + output_dir, + broker_port, + cores, + use_cmplog=None, + iterations=None, + tokens_file=None, + timeout=None + ))] fn new( input_dirs: Vec, output_dir: PathBuf, @@ -377,7 +389,7 @@ pub mod pybind { } /// Register the module - pub fn register(_py: Python, m: &PyModule) -> PyResult<()> { + pub fn register(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; Ok(()) } diff --git a/libafl_sugar/src/inmemory.rs b/libafl_sugar/src/inmemory.rs index cc05399f18..dfac4dd2d6 100644 --- a/libafl_sugar/src/inmemory.rs +++ b/libafl_sugar/src/inmemory.rs @@ -15,7 +15,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::{I2SRandReplace, Tokens}, }, observers::{CanTrack, HitcountsMapObserver, StdMapObserver, TimeObserver}, @@ -26,6 +27,7 @@ use libafl::{ }; use libafl_bolts::{ core_affinity::Cores, + nonzero, ownedref::OwnedMutSlice, rands::StdRand, shmem::{ShMemProvider, StdShMemProvider}, @@ -108,7 +110,7 @@ where } #[allow(clippy::similar_names)] -impl<'a, H> InMemoryBytesCoverageSugar<'a, H> +impl InMemoryBytesCoverageSugar<'_, H> where H: FnMut(&[u8]), { @@ -228,7 +230,7 @@ where if state.must_load_initial_inputs() { if self.input_dirs.is_empty() { // Generator of printable bytearrays of max size 32 - let mut generator = RandBytesGenerator::new(32); + let mut generator = RandBytesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state @@ -392,6 +394,16 @@ pub mod pybind { /// Create a new [`InMemoryBytesCoverageSugar`] #[new] #[allow(clippy::too_many_arguments)] + #[pyo3(signature = ( + input_dirs, + output_dir, + broker_port, + cores, + use_cmplog=None, + iterations=None, + tokens_file=None, + timeout=None + ))] fn new( input_dirs: Vec, output_dir: PathBuf, @@ -440,7 +452,7 @@ pub mod pybind { } /// Register the module - pub fn register(_py: Python, m: &PyModule) -> PyResult<()> { + pub fn register(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; Ok(()) } diff --git a/libafl_sugar/src/lib.rs b/libafl_sugar/src/lib.rs index 459562a437..aefb6e0224 100644 --- a/libafl_sugar/src/lib.rs +++ b/libafl_sugar/src/lib.rs @@ -1,22 +1,6 @@ //! Sugar API to simplify the life of users of `LibAFL` that just want to fuzz. /*! */ #![cfg_attr(feature = "document-features", doc = document_features::document_features!())] -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(clippy::all)] -#![deny(clippy::pedantic)] -#![forbid(unexpected_cfgs)] -#![allow( - clippy::unreadable_literal, - clippy::type_repetition_in_bounds, - clippy::missing_errors_doc, - clippy::cast_possible_truncation, - clippy::used_underscore_binding, - clippy::ptr_as_ptr, - clippy::missing_panics_doc, - clippy::missing_docs_in_private_items, - clippy::module_name_repetitions, - clippy::unreadable_literal -)] #![cfg_attr(not(test), warn( missing_debug_implementations, missing_docs, @@ -86,15 +70,15 @@ use pyo3::prelude::*; #[cfg(feature = "python")] #[pymodule] #[pyo3(name = "libafl_sugar")] -pub fn python_module(py: Python, m: &PyModule) -> PyResult<()> { - inmemory::pybind::register(py, m)?; +pub fn python_module(m: &Bound<'_, PyModule>) -> PyResult<()> { + inmemory::pybind::register(m)?; #[cfg(target_os = "linux")] { - qemu::pybind::register(py, m)?; + qemu::pybind::register(m)?; } #[cfg(unix)] { - forkserver::pybind::register(py, m)?; + forkserver::pybind::register(m)?; } Ok(()) } diff --git a/libafl_sugar/src/qemu.rs b/libafl_sugar/src/qemu.rs index aa1ca8839c..e0126c3b01 100644 --- a/libafl_sugar/src/qemu.rs +++ b/libafl_sugar/src/qemu.rs @@ -1,9 +1,6 @@ //! In-memory fuzzer with `QEMU`-based binary-only instrumentation //! -use core::{ - fmt::{self, Debug, Formatter}, - ptr::addr_of_mut, -}; +use core::fmt::{self, Debug, Formatter}; use std::{fs, net::SocketAddr, path::PathBuf, time::Duration}; use libafl::{ @@ -17,7 +14,8 @@ use libafl::{ inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, mutators::{ - scheduled::{havoc_mutations, tokens_mutations, StdScheduledMutator}, + havoc_mutations::havoc_mutations, + scheduled::{tokens_mutations, StdScheduledMutator}, token_mutations::Tokens, I2SRandReplace, }, @@ -29,17 +27,18 @@ use libafl::{ }; use libafl_bolts::{ core_affinity::Cores, + nonzero, ownedref::OwnedMutSlice, rands::StdRand, shmem::{ShMemProvider, StdShMemProvider}, tuples::{tuple_list, Handled, Merge}, AsSlice, }; -pub use libafl_qemu::qemu::Qemu; #[cfg(not(any(feature = "mips", feature = "hexagon")))] -use libafl_qemu::QemuCmpLogHelper; -use libafl_qemu::{edges, QemuEdgeCoverageHelper, QemuExecutor, QemuHooks}; -use libafl_targets::{edges_map_mut_ptr, CmpLogObserver}; +use libafl_qemu::modules::CmpLogModule; +pub use libafl_qemu::qemu::Qemu; +use libafl_qemu::{modules::edges::StdEdgeCoverageModule, Emulator, QemuExecutor}; +use libafl_targets::{edges_map_mut_ptr, CmpLogObserver, EDGES_MAP_DEFAULT_SIZE, MAX_EDGES_FOUND}; use typed_builder::TypedBuilder; use crate::{CORPUS_CACHE_SIZE, DEFAULT_TIMEOUT_SECS}; @@ -85,7 +84,7 @@ where iterations: Option, } -impl<'a, H> Debug for QemuBytesCoverageSugar<'a, H> +impl Debug for QemuBytesCoverageSugar<'_, H> where H: FnMut(&[u8]), { @@ -113,13 +112,13 @@ where } } -impl<'a, H> QemuBytesCoverageSugar<'a, H> +impl QemuBytesCoverageSugar<'_, H> where H: FnMut(&[u8]), { /// Run the fuzzer #[allow(clippy::too_many_lines, clippy::similar_names)] - pub fn run(&mut self, qemu: &Qemu) { + pub fn run(&mut self, qemu: Qemu) { let conf = match self.configuration.as_ref() { Some(name) => EventConfig::from_name(name), None => EventConfig::AlwaysUnique, @@ -156,14 +155,11 @@ where let time_observer = time_observer.clone(); // Create an observation channel using the coverage map - let edges_observer = unsafe { + let mut edges_observer = unsafe { HitcountsMapObserver::new(VariableMapObserver::from_mut_slice( "edges", - OwnedMutSlice::from_raw_parts_mut( - edges_map_mut_ptr(), - edges::EDGES_MAP_SIZE_IN_USE, - ), - addr_of_mut!(edges::MAX_EDGES_FOUND), + OwnedMutSlice::from_raw_parts_mut(edges_map_mut_ptr(), EDGES_MAP_DEFAULT_SIZE), + &raw mut MAX_EDGES_FOUND, )) .track_indices() }; @@ -214,27 +210,40 @@ where let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); // The wrapped harness function, calling out to the LLVM-style harness - let mut harness = |input: &BytesInput| { - let target = input.target_bytes(); - let buf = target.as_slice(); - harness_bytes(buf); - ExitKind::Ok - }; - if self.use_cmplog.unwrap_or(false) { - let mut hooks = QemuHooks::new( - *qemu, + let modules = { #[cfg(not(any(feature = "mips", feature = "hexagon")))] - tuple_list!( - QemuEdgeCoverageHelper::default(), - QemuCmpLogHelper::default(), - ), + { + tuple_list!( + StdEdgeCoverageModule::builder() + .map_observer(edges_observer.as_mut()) + .build() + .unwrap(), + CmpLogModule::default(), + ) + } #[cfg(any(feature = "mips", feature = "hexagon"))] - tuple_list!(QemuEdgeCoverageHelper::default()), - ); + { + tuple_list!(StdEdgeCoverageModule::builder() + .map_observer(edges_observer.as_mut()) + .build() + .unwrap()) + } + }; + + let mut harness = |_emulator: &mut Emulator<_, _, _, _, _>, + _state: &mut _, + input: &BytesInput| { + let target = input.target_bytes(); + let buf = target.as_slice(); + harness_bytes(buf); + ExitKind::Ok + }; + + let emulator = Emulator::empty().qemu(qemu).modules(modules).build()?; let executor = QemuExecutor::new( - &mut hooks, + emulator, &mut harness, tuple_list!(edges_observer, time_observer), &mut fuzzer, @@ -248,7 +257,7 @@ where if state.must_load_initial_inputs() { if self.input_dirs.is_empty() { // Generator of printable bytearrays of max size 32 - let mut generator = RandBytesGenerator::new(32); + let mut generator = RandBytesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state @@ -334,11 +343,24 @@ where } } } else { - let mut hooks = - QemuHooks::new(*qemu, tuple_list!(QemuEdgeCoverageHelper::default())); + let modules = tuple_list!(StdEdgeCoverageModule::builder() + .map_observer(edges_observer.as_mut()) + .build() + .unwrap()); + + let mut harness = |_emulator: &mut Emulator<_, _, _, _, _>, + _state: &mut _, + input: &BytesInput| { + let target = input.target_bytes(); + let buf = target.as_slice(); + harness_bytes(buf); + ExitKind::Ok + }; + + let emulator = Emulator::empty().qemu(qemu).modules(modules).build()?; let mut executor = QemuExecutor::new( - &mut hooks, + emulator, &mut harness, tuple_list!(edges_observer, time_observer), &mut fuzzer, @@ -351,7 +373,7 @@ where if state.must_load_initial_inputs() { if self.input_dirs.is_empty() { // Generator of printable bytearrays of max size 32 - let mut generator = RandBytesGenerator::new(32); + let mut generator = RandBytesGenerator::new(nonzero!(32)); // Generate 8 initial inputs state @@ -477,6 +499,16 @@ pub mod pybind { /// Create a new [`QemuBytesCoverageSugar`] #[new] #[allow(clippy::too_many_arguments)] + #[pyo3(signature = ( + input_dirs, + output_dir, + broker_port, + cores, + use_cmplog=None, + iterations=None, + tokens_file=None, + timeout=None + ))] fn new( input_dirs: Vec, output_dir: PathBuf, @@ -520,12 +552,12 @@ pub mod pybind { .tokens_file(self.tokens_file.clone()) .iterations(self.iterations) .build() - .run(&qemu.qemu); + .run(qemu.qemu); } } /// Register this class - pub fn register(_py: Python, m: &PyModule) -> PyResult<()> { + pub fn register(m: &Bound<'_, PyModule>) -> PyResult<()> { m.add_class::()?; Ok(()) } diff --git a/libafl_targets/Cargo.toml b/libafl_targets/Cargo.toml index b28eb2e610..8a5f741b90 100644 --- a/libafl_targets/Cargo.toml +++ b/libafl_targets/Cargo.toml @@ -9,12 +9,13 @@ readme = "../README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "testing"] edition = "2021" +rust-version = "1.82" categories = [ - "development-tools::testing", - "emulators", - "embedded", - "os", - "no-std", + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", ] [package.metadata.docs.rs] @@ -22,13 +23,13 @@ all-features = true [features] default = [ - "std", - "sanitizers_flags", - "windows_asan", - "forkserver", - "cmplog", - "coverage", - "common", + "std", + "sanitizers_flags", + "windows_asan", + "forkserver", + "cmplog", + "coverage", + "common", ] std = ["libafl/std"] introspection = ["libafl/introspection"] @@ -46,36 +47,44 @@ sancov_8bit = [] sancov_ngram4 = ["coverage"] sancov_ngram8 = ["coverage"] sancov_ctx = ["coverage"] -sancov_cmplog = ["common"] # Defines cmp and __sanitizer_weak_hook functions. Use libfuzzer_interceptors to define interceptors (only compatible with Linux) +sancov_cmplog = [ + "common", +] # Defines cmp and __sanitizer_weak_hook functions. Use libfuzzer_interceptors to define interceptors (only compatible with Linux) sancov_pcguard = ["sancov_pcguard_hitcounts"] sanitizer_interfaces = [] clippy = [] # Ignore compiler warnings during clippy observers = ["meminterval", "ahash"] -common = [] # Compile common C code defining sanitizer options and cross-platform intrinsics +common = [ +] # Compile common C code defining sanitizer options and cross-platform intrinsics coverage = ["common"] # Compile C code definining coverage maps cmplog = ["common"] # Compile C code defining cmp log maps forkserver = ["common"] # Compile C code for forkserver support windows_asan = ["common"] # Compile C code for ASAN on Windows whole_archive = [] # use +whole-archive to ensure the presence of weak symbols -cmplog_extended_instrumentation = [] # support for aflpp cmplog map, we will remove this once aflpp and libafl cmplog shares the same LLVM passes. +cmplog_extended_instrumentation = [ +] # support for aflpp cmplog map, we will remove this once aflpp and libafl cmplog shares the same LLVM passes. function-logging = ["common"] track_hit_feedbacks = ["libafl/track_hit_feedbacks"] [build-dependencies] -bindgen = "0.69.4" -cc = { version = "1.0", features = ["parallel"] } -rustversion = "1.0" +bindgen = "0.70.1" +cc = { version = "1.1.21", features = ["parallel"] } +rustversion = "1.0.17" [dependencies] -libafl = { path = "../libafl", version = "0.13.0", default-features = false, features = [] } -libafl_bolts = { path = "../libafl_bolts", version = "0.13.0", default-features = false, features = [] } -libc = "0.2" -hashbrown = "0.14" -once_cell = "1.19" -log = "0.4.20" -rustversion = "1.0" +libafl = { workspace = true, features = [] } +libafl_bolts = { workspace = true, features = [] } +libc = { workspace = true } +hashbrown = { workspace = true, default-features = true } +once_cell = "1.19.0" +log = { workspace = true } +rustversion = { workspace = true } -rangemap = "1.3" -serde = { version = "1.0", default-features = false, features = ["alloc"] } # serialization lib -meminterval = { version = "0.4", features = ["serde"], optional = true } -ahash = { version = "0.8.3", default-features = false, optional = true } -# serde-big-array = "0.3.2" +rangemap = { workspace = true } +serde = { workspace = true, default-features = false, features = [ + "alloc", +] } # serialization lib +meminterval = { workspace = true, features = ["serde"], optional = true } +ahash = { workspace = true, default-features = false, optional = true } + +[lints] +workspace = true diff --git a/libafl_targets/build.rs b/libafl_targets/build.rs index 674851d41e..3f92a0004c 100644 --- a/libafl_targets/build.rs +++ b/libafl_targets/build.rs @@ -26,36 +26,47 @@ fn main() { let dest_path = Path::new(&out_dir).join("constants.rs"); let mut constants_file = File::create(dest_path).expect("Could not create file"); - let edges_map_size_max: usize = option_env!("LIBAFL_EDGES_MAP_SIZE_MAX") + let edges_map_allocated_size: usize = option_env!("LIBAFL_EDGES_MAP_ALLOCATED_SIZE") + .or(option_env!("LIBAFL_EDGES_MAP_ALLOCATED_SIZE")) // keep old env for retrocompatibility .map_or(Ok(TWO_MB), str::parse) - .expect("Could not parse LIBAFL_EDGES_MAP_SIZE_MAX"); - let edges_map_size_in_use: usize = option_env!("LIBAFL_EDGES_MAP_SIZE_IN_USE") + .expect("Could not parse LIBAFL_EDGES_MAP_ALLOCATED_SIZE"); + + let edges_map_default_size: usize = option_env!("LIBAFL_EDGES_MAP_DEFAULT_SIZE") + .or(option_env!("LIBAFL_EDGES_MAP_DEFAULT_SIZE")) // keep old env for retrocompatibility .map_or(Ok(SIXTY_FIVE_KB), str::parse) - .expect("Could not parse LIBAFL_EDGES_MAP_SIZE_IN_USE"); + .expect("Could not parse LIBAFL_EDGES_MAP_DEFAULT_SIZE"); + let cmp_map_size: usize = option_env!("LIBAFL_CMP_MAP_SIZE") .map_or(Ok(SIXTY_FIVE_KB), str::parse) .expect("Could not parse LIBAFL_CMP_MAP_SIZE"); + let cmplog_map_w: usize = option_env!("LIBAFL_CMPLOG_MAP_W") .map_or(Ok(SIXTY_FIVE_KB), str::parse) .expect("Could not parse LIBAFL_CMPLOG_MAP_W"); + let cmplog_map_h: usize = option_env!("LIBAFL_CMPLOG_MAP_H") .map_or(Ok(32), str::parse) .expect("Could not parse LIBAFL_CMPLOG_MAP_H"); + let acc_map_size: usize = option_env!("LIBAFL_ACCOUNTING_MAP_SIZE") .map_or(Ok(SIXTY_FIVE_KB), str::parse) .expect("Could not parse LIBAFL_ACCOUNTING_MAP_SIZE"); + let ddg_map_size: usize = option_env!("LIBAFL_DDG_MAP_SIZE") .map_or(Ok(SIXTY_FIVE_KB), str::parse) .expect("Could not parse LIBAFL_DDG_MAP_SIZE"); + assert!(edges_map_default_size <= edges_map_allocated_size); + assert!(edges_map_default_size.is_power_of_two()); + write!( constants_file, "// These constants are autogenerated by build.rs /// The default size of the edges map the fuzzer uses - pub const EDGES_MAP_SIZE_IN_USE: usize = {edges_map_size_in_use}; + pub const EDGES_MAP_DEFAULT_SIZE: usize = {edges_map_default_size}; /// The real allocated size of the edges map - pub const EDGES_MAP_SIZE_MAX: usize = {edges_map_size_max}; + pub const EDGES_MAP_ALLOCATED_SIZE: usize = {edges_map_allocated_size}; /// The size of the cmps map pub const CMP_MAP_SIZE: usize = {cmp_map_size}; /// The width of the `CmpLog` map @@ -70,7 +81,10 @@ fn main() { ) .expect("Could not write file"); - println!("cargo:rerun-if-env-changed=LIBAFL_EDGES_MAP_SIZE_IN_USE"); + println!("cargo:rerun-if-env-changed=LIBAFL_EDGES_MAP_DEFAULT_SIZE"); + println!("cargo:rerun-if-env-changed=LIBAFL_EDGES_MAP_DEFAULT_SIZE"); + println!("cargo:rerun-if-env-changed=LIBAFL_EDGES_MAP_ALLOCATED_SIZE"); + println!("cargo:rerun-if-env-changed=LIBAFL_EDGES_MAP_ALLOCATED_SIZE"); println!("cargo:rerun-if-env-changed=LIBAFL_CMP_MAP_SIZE"); println!("cargo:rerun-if-env-changed=LIBAFL_CMPLOG_MAP_W"); println!("cargo:rerun-if-env-changed=LIBAFL_CMPLOG_MAP_H"); @@ -160,8 +174,8 @@ fn main() { cc::Build::new() .file(src_dir.join("coverage.c")) .define( - "EDGES_MAP_SIZE_MAX", - Some(&*format!("{edges_map_size_max}")), + "EDGES_MAP_ALLOCATED_SIZE", + Some(&*format!("{edges_map_allocated_size}")), ) .define("ACCOUNTING_MAP_SIZE", Some(&*format!("{acc_map_size}"))) .define("DDG_MAP_SIZE", Some(&*format!("{ddg_map_size}"))) diff --git a/libafl_targets/src/call.rs b/libafl_targets/src/call.rs index 581a3f2fcd..c3fcc38892 100644 --- a/libafl_targets/src/call.rs +++ b/libafl_targets/src/call.rs @@ -12,9 +12,12 @@ pub static mut FUNCTION_LIST: Lazy> = Lazy::new(HashMap::n #[no_mangle] /// The runtime code inserted at every callinst invokation (if you used the function-logging.cc) /// # Safety -/// unsafe because it touches pub static mut +/// unsafe because it touches the pub static mut `FUNCTION_LIST`. +/// May not be called concurrently. pub unsafe extern "C" fn __libafl_target_call_hook(id: usize) { - *FUNCTION_LIST.entry(id).or_insert(0) += 1; + let function_list_ptr = &raw mut FUNCTION_LIST; + let function_list = &mut *function_list_ptr; + *function_list.entry(id).or_insert(0) += 1; } /// The empty struct to clear the `FUNCTION_LIST` before the execution @@ -41,7 +44,14 @@ where fn pre_exec(&mut self, _state: &mut S, _input: &::Input) { // clear it before the execution - unsafe { FUNCTION_LIST.clear() } + // # Safety + // This typically happens while no other execution happens. + // In theory there is a race, but we can ignore it _for this use case_. + unsafe { + let function_list_ptr = &raw mut FUNCTION_LIST; + let function_list = &mut *function_list_ptr; + function_list.clear(); + } } fn post_exec(&mut self, _state: &mut S, _input: &::Input) {} diff --git a/libafl_targets/src/cmplog.c b/libafl_targets/src/cmplog.c index ffaec50f3b..db4d51b667 100644 --- a/libafl_targets/src/cmplog.c +++ b/libafl_targets/src/cmplog.c @@ -99,7 +99,7 @@ static inline long area_is_valid(const void *ptr, size_t len) { // Very generic cmplog instructions callback void __libafl_targets_cmplog_instructions(uintptr_t k, uint8_t shape, uint64_t arg1, uint64_t arg2) { - cmplog_instructions_checked(k, shape, arg1, arg2); + cmplog_instructions_checked(k, shape, arg1, arg2, 0); } // Very generic cmplog routines callback @@ -145,7 +145,7 @@ void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2) { k = (k >> 4) ^ (k << 8); k &= CMPLOG_MAP_W - 1; - cmplog_instructions_checked(k, 1, arg1, arg2); + cmplog_instructions_checked(k, 1, arg1, arg2, 0); } void __cmplog_ins_hook2_extended(uint16_t arg1, uint16_t arg2, uint8_t attr) { @@ -160,7 +160,7 @@ void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2) { k = (k >> 4) ^ (k << 8); k &= CMPLOG_MAP_W - 1; - cmplog_instructions_checked(k, 2, arg1, arg2); + cmplog_instructions_checked(k, 2, arg1, arg2, 0); } void __cmplog_ins_hook4_extended(uint32_t arg1, uint32_t arg2, uint8_t attr) { @@ -175,7 +175,7 @@ void __cmplog_ins_hook4(uint32_t arg1, uint32_t arg2) { k = (k >> 4) ^ (k << 8); k &= CMPLOG_MAP_W - 1; - cmplog_instructions_checked(k, 4, arg1, arg2); + cmplog_instructions_checked(k, 4, arg1, arg2, 0); } void __cmplog_ins_hook8_extended(uint64_t arg1, uint64_t arg2, uint8_t attr) { @@ -190,7 +190,7 @@ void __cmplog_ins_hook8(uint64_t arg1, uint64_t arg2) { k = (k >> 4) ^ (k << 8); k &= CMPLOG_MAP_W - 1; - cmplog_instructions_checked(k, 8, arg1, arg2); + cmplog_instructions_checked(k, 8, arg1, arg2, 0); } #if !defined(_WIN32) && defined(__SIZEOF_INT128__) @@ -207,7 +207,7 @@ void __cmplog_ins_hook16(uint128_t arg1, uint128_t arg2) { k = (k >> 4) ^ (k << 8); k &= CMPLOG_MAP_W - 1; - cmplog_instructions_checked(k, 16, arg1, arg2); + cmplog_instructions_checked(k, 16, arg1, arg2, 0); } void __cmplog_ins_hookN_extended(uint128_t arg1, uint128_t arg2, uint8_t attr, @@ -223,7 +223,7 @@ void __cmplog_ins_hookN(uint128_t arg1, uint128_t arg2, uint8_t size) { k = (k >> 4) ^ (k << 8); k &= CMPLOG_MAP_W - 1; - cmplog_instructions_checked(k, size, arg1, arg2); + cmplog_instructions_checked(k, size, arg1, arg2, 0); } #endif /* diff --git a/libafl_targets/src/cmplog.h b/libafl_targets/src/cmplog.h index 05be988e6d..2750a85fde 100644 --- a/libafl_targets/src/cmplog.h +++ b/libafl_targets/src/cmplog.h @@ -4,6 +4,15 @@ #include "common.h" #include +#ifdef _MSC_VER + #define PACKED(__Declaration__) \ + __pragma(pack(push, 1)) __Declaration__ __pragma(pack(pop)) +#endif + +#ifndef _MSC_VER + #define PACKED(__Declaration__) __Declaration__ __attribute__((__packed__)) +#endif + #ifndef CMPLOG_MAP_W #define CMPLOG_MAP_W 65536 #endif @@ -11,12 +20,7 @@ #define CMPLOG_MAP_H 32 #endif -// difference between aflpp and libafl -#ifdef CMPLOG_EXTENDED - #define CMPLOG_RTN_LEN 31 -#else - #define CMPLOG_RTN_LEN 32 -#endif +#define CMPLOG_RTN_LEN 32 #define CMPLOG_MAP_RTN_H \ ((CMPLOG_MAP_H * sizeof(CmpLogInstruction)) / sizeof(CmpLogRoutine)) @@ -33,46 +37,44 @@ typedef struct CmpLogHeader { uint8_t kind; } CmpLogHeader; -#ifndef _WIN32 -typedef struct CmpLogHeaderExtended { +typedef PACKED(struct CmpLogHeaderExtended { unsigned hits : 6; unsigned shape : 5; unsigned type : 1; unsigned attribute : 4; -} __attribute__((packed)) CmpLogHeaderExtended; -#else -__pragma(pack(push, 1)) typedef struct CmpLogHeaderExtended { - unsigned hits : 6; - unsigned shape : 5; - unsigned type : 1; - unsigned attribute : 4; -} CmpLogHeaderExtended; -__pragma(pack(pop)) -#endif +}) CmpLogHeaderExtended; typedef struct CmpLogInstruction { uint64_t v0; uint64_t v1; + uint8_t v0_is_const; } CmpLogInstruction; -typedef struct CmpLogInstructionExtended { +typedef PACKED(struct CmpLogInstructionExtended { uint64_t v0; - uint64_t v1; uint64_t v0_128; + uint64_t + v0_256_0; // u256 is unsupported by any compiler for now, so future use + uint64_t v0_256_1; + uint64_t v1; uint64_t v1_128; -} CmpLogInstructionExtended; + uint64_t v1_256_0; + uint64_t v1_256_1; + uint8_t unused[8]; +}) CmpLogInstructionExtended; typedef struct CmpLogRoutine { uint8_t v0[CMPLOG_RTN_LEN]; uint8_t v1[CMPLOG_RTN_LEN]; } CmpLogRoutine; -typedef struct CmpLogRoutineExtended { +typedef PACKED(struct CmpLogRoutineExtended { uint8_t v0[CMPLOG_RTN_LEN]; - uint8_t v0_len; uint8_t v1[CMPLOG_RTN_LEN]; + uint8_t v0_len; uint8_t v1_len; -} CmpLogRoutineExtended; + uint8_t unused[6]; +}) CmpLogRoutineExtended; typedef struct CmpLogMap { CmpLogHeader headers[CMPLOG_MAP_W]; @@ -105,7 +107,8 @@ extern uint8_t libafl_cmplog_enabled; // cmplog_routines_checked_extended static inline void cmplog_instructions_checked(uintptr_t k, uint8_t shape, - uint64_t arg1, uint64_t arg2) { + uint64_t arg1, uint64_t arg2, + uint8_t arg1_is_const) { if (!libafl_cmplog_enabled) { return; } libafl_cmplog_enabled = false; @@ -125,6 +128,7 @@ static inline void cmplog_instructions_checked(uintptr_t k, uint8_t shape, hits &= CMPLOG_MAP_H - 1; libafl_cmplog_map_ptr->vals.operands[k][hits].v0 = arg1; libafl_cmplog_map_ptr->vals.operands[k][hits].v1 = arg2; + libafl_cmplog_map_ptr->vals.operands[k][hits].v0_is_const = arg1_is_const; libafl_cmplog_enabled = true; } diff --git a/libafl_targets/src/cmps/mod.rs b/libafl_targets/src/cmps/mod.rs index 51ccebce30..5ebe68ccce 100644 --- a/libafl_targets/src/cmps/mod.rs +++ b/libafl_targets/src/cmps/mod.rs @@ -13,9 +13,10 @@ use core::{ }; use libafl::{ - observers::{cmp::AFLppCmpLogHeader, CmpMap, CmpValues}, + observers::{cmp::AFLppCmpLogHeader, CmpMap, CmpValues, CmplogBytes}, Error, }; +use libafl_bolts::HasLen; use serde::{Deserialize, Deserializer, Serialize, Serializer}; pub use stages::*; @@ -62,6 +63,7 @@ pub use libafl_cmplog_map_ptr as CMPLOG_MAP_PTR; /// Value indicating if cmplog is enabled. #[no_mangle] +#[allow(non_upper_case_globals)] pub static mut libafl_cmplog_enabled: u8 = 0; pub use libafl_cmplog_enabled as CMPLOG_ENABLED; @@ -80,17 +82,23 @@ pub struct CmpLogHeader { // VALS /// The AFL++ `cmp_operands` struct -#[derive(Default, Debug, Clone, Copy)] -#[repr(C, packed)] +/// /// Comparison operands, represented as either two (left and right of comparison) u64 values or /// two (left and right of comparison) u128 values, split into two u64 values. If the left and /// right values are smaller than u64, they can be sign or zero extended to 64 bits, as the actual /// comparison size is determined by the `hits` field of the associated `AFLppCmpLogHeader`. +#[derive(Default, Debug, Clone, Copy)] +#[repr(C, packed)] pub struct AFLppCmpLogOperands { v0: u64, - v1: u64, v0_128: u64, + v0_256_0: u64, + v0_256_1: u64, + v1: u64, v1_128: u64, + v1_256_0: u64, + v1_256_1: u64, + unused: [u8; 8], } impl AFLppCmpLogOperands { @@ -99,9 +107,14 @@ impl AFLppCmpLogOperands { pub fn new(v0: u64, v1: u64) -> Self { Self { v0, - v1, v0_128: 0, + v0_256_0: 0, + v0_256_1: 0, + v1, v1_128: 0, + v1_256_0: 0, + v1_256_1: 0, + unused: [0; 8], } } @@ -115,9 +128,14 @@ impl AFLppCmpLogOperands { Self { v0, - v1, v0_128, + v0_256_0: 0, + v0_256_1: 0, + v1, v1_128, + v1_256_0: 0, + v1_256_1: 0, + unused: [0; 8], } } @@ -175,10 +193,11 @@ impl AFLppCmpLogOperands { #[repr(C, packed)] /// Comparison function operands, like for strcmp/memcmp, represented as two byte arrays. pub struct AFLppCmpLogFnOperands { - v0: [u8; 31], + v0: [u8; 32], + v1: [u8; 32], v0_len: u8, - v1: [u8; 31], v1_len: u8, + unused: [u8; 6], } impl AFLppCmpLogFnOperands { @@ -188,8 +207,8 @@ impl AFLppCmpLogFnOperands { let v0_len = v0.len() as u8; let v1_len = v1.len() as u8; - let mut v0_arr = [0; 31]; - let mut v1_arr = [0; 31]; + let mut v0_arr = [0; 32]; + let mut v1_arr = [0; 32]; v0_arr.copy_from_slice(v0); v1_arr.copy_from_slice(v1); @@ -199,12 +218,13 @@ impl AFLppCmpLogFnOperands { v0_len, v1: v1_arr, v1_len, + unused: [0; 6], } } #[must_use] /// first rtn operand - pub fn v0(&self) -> &[u8; 31] { + pub fn v0(&self) -> &[u8; 32] { &self.v0 } @@ -216,7 +236,7 @@ impl AFLppCmpLogFnOperands { #[must_use] /// first rtn operand len - pub fn v1(&self) -> &[u8; 31] { + pub fn v1(&self) -> &[u8; 32] { &self.v1 } @@ -242,7 +262,7 @@ impl AFLppCmpLogFnOperands { /// The operands logged during `CmpLog`. #[repr(C)] #[derive(Default, Debug, Clone, Copy)] -pub struct CmpLogInstruction(u64, u64); +pub struct CmpLogInstruction(u64, u64, u8); /// The routine arguments logged during `CmpLog`. #[repr(C)] @@ -353,18 +373,22 @@ impl CmpMap for CmpLogMap { 1 => Some(CmpValues::U8(( self.vals.operands[idx][execution].0 as u8, self.vals.operands[idx][execution].1 as u8, + self.vals.operands[idx][execution].2 == 1, ))), 2 => Some(CmpValues::U16(( self.vals.operands[idx][execution].0 as u16, self.vals.operands[idx][execution].1 as u16, + self.vals.operands[idx][execution].2 == 1, ))), 4 => Some(CmpValues::U32(( self.vals.operands[idx][execution].0 as u32, self.vals.operands[idx][execution].1 as u32, + self.vals.operands[idx][execution].2 == 1, ))), 8 => Some(CmpValues::U64(( self.vals.operands[idx][execution].0, self.vals.operands[idx][execution].1, + self.vals.operands[idx][execution].2 == 1, ))), // other => panic!("Invalid CmpLog shape {}", other), _ => None, @@ -373,8 +397,14 @@ impl CmpMap for CmpLogMap { } else { unsafe { Some(CmpValues::Bytes(( - self.vals.routines[idx][execution].0.to_vec(), - self.vals.routines[idx][execution].1.to_vec(), + CmplogBytes::from_buf_and_len( + self.vals.routines[idx][execution].0, + CMPLOG_RTN_LEN as u8, + ), + CmplogBytes::from_buf_and_len( + self.vals.routines[idx][execution].1, + CMPLOG_RTN_LEN as u8, + ), ))) } } @@ -395,6 +425,7 @@ impl CmpMap for CmpLogMap { /// The global `CmpLog` map for the current `LibAFL` run. #[no_mangle] #[allow(clippy::large_stack_arrays)] +#[allow(non_upper_case_globals)] pub static mut libafl_cmplog_map: CmpLogMap = CmpLogMap { headers: [CmpLogHeader { hits: 0, @@ -402,7 +433,7 @@ pub static mut libafl_cmplog_map: CmpLogMap = CmpLogMap { kind: 0, }; CMPLOG_MAP_W], vals: CmpLogVals { - operands: [[CmpLogInstruction(0, 0); CMPLOG_MAP_H]; CMPLOG_MAP_W], + operands: [[CmpLogInstruction(0, 0, 0); CMPLOG_MAP_H]; CMPLOG_MAP_W], }, }; @@ -411,13 +442,18 @@ pub static mut libafl_cmplog_map: CmpLogMap = CmpLogMap { #[cfg(feature = "cmplog_extended_instrumentation")] #[allow(clippy::large_stack_arrays)] pub static mut libafl_cmplog_map_extended: AFLppCmpLogMap = AFLppCmpLogMap { - headers: [AFLppCmpLogHeader { data: [0; 2] }; CMPLOG_MAP_W], + headers: [AFLppCmpLogHeader::new_with_raw_value(0); CMPLOG_MAP_W], vals: AFLppCmpLogVals { operands: [[AFLppCmpLogOperands { v0: 0, - v1: 0, v0_128: 0, + v0_256_0: 0, + v0_256_1: 0, + v1: 0, v1_128: 0, + v1_256_0: 0, + v1_256_1: 0, + unused: [0; 8], }; CMPLOG_MAP_H]; CMPLOG_MAP_W], }, }; @@ -427,15 +463,22 @@ pub use libafl_cmplog_map as CMPLOG_MAP; pub use libafl_cmplog_map_extended as CMPLOG_MAP_EXTENDED; #[derive(Debug, Clone)] -#[repr(C, packed)] +#[repr(C)] /// Comparison map compatible with AFL++ cmplog instrumentation pub struct AFLppCmpLogMap { headers: [AFLppCmpLogHeader; CMPLOG_MAP_W], vals: AFLppCmpLogVals, } +impl HasLen for AFLppCmpLogMap { + fn len(&self) -> usize { + CMPLOG_MAP_W + } +} + impl AFLppCmpLogMap { #[must_use] + #[allow(clippy::cast_ptr_alignment)] /// Instantiate a new boxed zeroed `AFLppCmpLogMap`. This should be used to create a new /// map, because it is so large it cannot be allocated on the stack with default /// runtime configuration. @@ -482,6 +525,7 @@ impl Serialize for AFLppCmpLogMap { } impl<'de> Deserialize<'de> for AFLppCmpLogMap { + #[allow(clippy::cast_ptr_alignment)] fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -498,11 +542,11 @@ impl CmpMap for AFLppCmpLogMap { } fn executions_for(&self, idx: usize) -> usize { - self.headers[idx].hits() as usize + self.headers[idx].hits().value() as usize } fn usable_executions_for(&self, idx: usize) -> usize { - if self.headers[idx]._type() == CMPLOG_KIND_INS { + if self.headers[idx].type_().value() == CMPLOG_KIND_INS { if self.executions_for(idx) < CMPLOG_MAP_H { self.executions_for(idx) } else { @@ -516,26 +560,31 @@ impl CmpMap for AFLppCmpLogMap { } fn values_of(&self, idx: usize, execution: usize) -> Option { - if self.headers[idx]._type() == CMPLOG_KIND_INS { + let header = self.headers[idx]; + if header.type_().value() == CMPLOG_KIND_INS { unsafe { - match self.headers[idx].shape() { + match self.headers[idx].shape().value() { 0 => Some(CmpValues::U8(( self.vals.operands[idx][execution].v0 as u8, self.vals.operands[idx][execution].v1 as u8, + false, ))), 1 => Some(CmpValues::U16(( self.vals.operands[idx][execution].v0 as u16, self.vals.operands[idx][execution].v1 as u16, + false, ))), 3 => Some(CmpValues::U32(( self.vals.operands[idx][execution].v0 as u32, self.vals.operands[idx][execution].v1 as u32, + false, ))), 7 => Some(CmpValues::U64(( self.vals.operands[idx][execution].v0, self.vals.operands[idx][execution].v1, + false, ))), - // TODO handle 128 bits cmps + // TODO handle 128 bits & 256 bits cmps // other => panic!("Invalid CmpLog shape {}", other), _ => None, } @@ -545,8 +594,8 @@ impl CmpMap for AFLppCmpLogMap { let v0_len = self.vals.fn_operands[idx][execution].v0_len & (0x80 - 1); let v1_len = self.vals.fn_operands[idx][execution].v1_len & (0x80 - 1); Some(CmpValues::Bytes(( - self.vals.fn_operands[idx][execution].v0[..(v0_len as usize)].to_vec(), - self.vals.fn_operands[idx][execution].v1[..(v1_len as usize)].to_vec(), + CmplogBytes::from_buf_and_len(self.vals.fn_operands[idx][execution].v0, v0_len), + CmplogBytes::from_buf_and_len(self.vals.fn_operands[idx][execution].v1, v1_len), ))) } } @@ -554,7 +603,7 @@ impl CmpMap for AFLppCmpLogMap { fn reset(&mut self) -> Result<(), Error> { // For performance, we reset just the headers - self.headers.fill(AFLppCmpLogHeader { data: [0; 2] }); + self.headers.fill(AFLppCmpLogHeader::new_with_raw_value(0)); Ok(()) } diff --git a/libafl_targets/src/cmps/observers/aflpp.rs b/libafl_targets/src/cmps/observers/aflpp.rs index 4405072c3b..c07df26968 100644 --- a/libafl_targets/src/cmps/observers/aflpp.rs +++ b/libafl_targets/src/cmps/observers/aflpp.rs @@ -1,11 +1,10 @@ use alloc::{borrow::Cow, vec::Vec}; -use core::{fmt::Debug, marker::PhantomData}; +use core::fmt::Debug; use libafl::{ executors::ExitKind, - inputs::UsesInput, observers::{ - cmp::{AFLppCmpValuesMetadata, CmpMap, CmpObserver, CmpObserverMetadata, CmpValues}, + cmp::{AFLppCmpValuesMetadata, CmpMap, CmpObserver, CmpValues}, Observer, }, Error, HasMetadata, @@ -65,20 +64,17 @@ struct cmp_map { /// A [`CmpObserver`] observer for AFL++ redqueen #[derive(Serialize, Deserialize, Debug)] -pub struct AFLppCmpLogObserver<'a, S> { +pub struct AFLppCmpLogObserver<'a> { cmp_map: OwnedRefMut<'a, AFLppCmpLogMap>, size: Option>, name: Cow<'static, str>, add_meta: bool, - original: >::Data, - phantom: PhantomData, + original: bool, } -impl<'a, S> CmpObserver<'a, AFLppCmpLogMap, S, AFLppCmpValuesMetadata> - for AFLppCmpLogObserver<'a, S> -where - S: UsesInput + HasMetadata, -{ +impl CmpObserver for AFLppCmpLogObserver<'_> { + type Map = AFLppCmpLogMap; + /// Get the number of usable cmps (all by default) fn usable_count(&self) -> usize { match &self.size { @@ -94,16 +90,87 @@ where fn cmp_map_mut(&mut self) -> &mut AFLppCmpLogMap { self.cmp_map.as_mut() } +} - fn cmp_observer_data( - &self, - ) -> >::Data { - self.original +impl Observer for AFLppCmpLogObserver<'_> +where + S: HasMetadata, +{ + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { + #[cfg(feature = "cmplog_extended_instrumentation")] + unsafe { + // if the target is compiled with aflpp and you are running forkserver then this is not needed + // because with forkserver, you have two executors (processes), one is dedicated for edge-cov + // the other dedicated for cmplog. + // however if it is in-process, then cmplog instrumentation is in the same binary as the edge-cov binary + // (so we only have one executable) + // therefore we need to turn this thing on and off to change this according to what executors we are using + CMPLOG_ENABLED = 1; + } + self.cmp_map.as_mut().reset()?; + Ok(()) + } + + fn post_exec(&mut self, state: &mut S, _input: &I, _exit_kind: &ExitKind) -> Result<(), Error> { + #[cfg(feature = "cmplog_extended_instrumentation")] + unsafe { + CMPLOG_ENABLED = 0; + } + if self.add_meta { + self.add_cmpvalues_meta(state); + } + Ok(()) + } +} + +impl Named for AFLppCmpLogObserver<'_> { + fn name(&self) -> &Cow<'static, str> { + &self.name + } +} + +impl<'a> AFLppCmpLogObserver<'a> { + /// Creates a new [`AFLppCmpLogObserver`] with the given name and map. + #[must_use] + pub fn new( + name: &'static str, + cmp_map: OwnedRefMut<'a, AFLppCmpLogMap>, + add_meta: bool, + ) -> Self { + Self { + name: Cow::from(name), + size: None, + cmp_map, + add_meta, + original: false, + } + } + /// Setter for the flag if the executed input is a mutated one or the original one + pub fn set_original(&mut self, v: bool) { + self.original = v; + } + + /// Creates a new [`AFLppCmpLogObserver`] with the given name, map and reference to variable size. + #[must_use] + pub fn with_size( + name: &'static str, + cmp_map: OwnedRefMut<'a, AFLppCmpLogMap>, + add_meta: bool, + original: bool, + size: OwnedRefMut<'a, usize>, + ) -> Self { + Self { + name: Cow::from(name), + size: Some(size), + cmp_map, + add_meta, + original, + } } /// Add `AFLppCmpValuesMetadata` to the State including the logged values. /// This routine does a basic loop filtering because loop index cmps are not interesting. - fn add_cmpvalues_meta(&mut self, state: &mut S) + fn add_cmpvalues_meta(&mut self, state: &mut S) where S: HasMetadata, { @@ -131,193 +198,101 @@ where } let usable_count = self.usable_count(); - let cmp_observer_data = self.cmp_observer_data(); - - meta.add_from(usable_count, self.cmp_map_mut(), cmp_observer_data); + let original = self.original; + add_to_aflpp_cmp_metadata(meta, usable_count, self.cmp_map_mut(), original); } } -impl<'a, S> Observer for AFLppCmpLogObserver<'a, S> -where - S: UsesInput + HasMetadata, -{ - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { - #[cfg(feature = "cmplog_extended_instrumentation")] - unsafe { - // if the target is compiled with aflpp and you are running forkserver then this is not needed - // because with forkserver, you have two executors (processes), one is dedicated for edge-cov - // the other dedicated for cmplog. - // however if it is in-process, then cmplog instrumentation is in the same binary as the edge-cov binary - // (so we only have one executable) - // therefore we need to turn this thing on and off to change this according to what executors we are using - CMPLOG_ENABLED = 1; - } - self.cmp_map.as_mut().reset()?; - Ok(()) - } +/// Add the metadata +pub fn add_to_aflpp_cmp_metadata( + meta: &mut AFLppCmpValuesMetadata, + usable_count: usize, + cmp_map: &mut AFLppCmpLogMap, + original: bool, +) { + let count = usable_count; + for i in 0..count { + let execs = cmp_map.usable_executions_for(i); + if execs > 0 { + if original { + // Update header + meta.headers.push((i, cmp_map.headers[i])); + } - fn post_exec( - &mut self, - state: &mut S, - _input: &S::Input, - _exit_kind: &ExitKind, - ) -> Result<(), Error> { - #[cfg(feature = "cmplog_extended_instrumentation")] - unsafe { - CMPLOG_ENABLED = 0; - } - if self.add_meta { - self.add_cmpvalues_meta(state); - } - Ok(()) - } -} + // Recongize loops and discard if needed + if execs > 4 { + let mut increasing_v0 = 0; + let mut increasing_v1 = 0; + let mut decreasing_v0 = 0; + let mut decreasing_v1 = 0; -impl<'a, S> Named for AFLppCmpLogObserver<'a, S> { - fn name(&self) -> &Cow<'static, str> { - &self.name - } -} - -impl<'a, S> AFLppCmpLogObserver<'a, S> { - /// Creates a new [`AFLppCmpLogObserver`] with the given name and map. - #[must_use] - pub fn new( - name: &'static str, - cmp_map: OwnedRefMut<'a, AFLppCmpLogMap>, - add_meta: bool, - ) -> Self { - Self { - name: Cow::from(name), - size: None, - cmp_map, - add_meta, - original: false, - phantom: PhantomData, - } - } - /// Setter for the flag if the executed input is a mutated one or the original one - pub fn set_original(&mut self, v: bool) { - self.original = v; - } - - /// Creates a new [`AFLppCmpLogObserver`] with the given name, map and reference to variable size. - #[must_use] - pub fn with_size( - name: &'static str, - cmp_map: OwnedRefMut<'a, AFLppCmpLogMap>, - add_meta: bool, - original: bool, - size: OwnedRefMut<'a, usize>, - ) -> Self { - Self { - name: Cow::from(name), - size: Some(size), - cmp_map, - add_meta, - original, - phantom: PhantomData, - } - } -} - -impl<'a> CmpObserverMetadata<'a, AFLppCmpLogMap> for AFLppCmpValuesMetadata { - type Data = bool; - - fn new_metadata() -> Self { - Self::new() - } - - fn add_from( - &mut self, - usable_count: usize, - cmp_map: &mut AFLppCmpLogMap, - cmp_observer_data: Self::Data, - ) { - let count = usable_count; - for i in 0..count { - let execs = cmp_map.usable_executions_for(i); - if execs > 0 { - if cmp_observer_data { - // Update header - self.headers.push((i, cmp_map.headers[i])); - } - - // Recongize loops and discard if needed - if execs > 4 { - let mut increasing_v0 = 0; - let mut increasing_v1 = 0; - let mut decreasing_v0 = 0; - let mut decreasing_v1 = 0; - - let mut last: Option = None; - for j in 0..execs { - if let Some(val) = cmp_map.values_of(i, j) { - if let Some(l) = last.and_then(|x| x.to_u64_tuple()) { - if let Some(v) = val.to_u64_tuple() { - if l.0.wrapping_add(1) == v.0 { - increasing_v0 += 1; - } - if l.1.wrapping_add(1) == v.1 { - increasing_v1 += 1; - } - if l.0.wrapping_sub(1) == v.0 { - decreasing_v0 += 1; - } - if l.1.wrapping_sub(1) == v.1 { - decreasing_v1 += 1; - } + let mut last: Option = None; + for j in 0..execs { + if let Some(val) = cmp_map.values_of(i, j) { + if let Some(l) = last.and_then(|x| x.to_u64_tuple()) { + if let Some(v) = val.to_u64_tuple() { + if l.0.wrapping_add(1) == v.0 { + increasing_v0 += 1; + } + if l.1.wrapping_add(1) == v.1 { + increasing_v1 += 1; + } + if l.0.wrapping_sub(1) == v.0 { + decreasing_v0 += 1; + } + if l.1.wrapping_sub(1) == v.1 { + decreasing_v1 += 1; } } - last = Some(val); } - } - // We check for execs-2 because the logged execs may wrap and have something like - // 8 9 10 3 4 5 6 7 - if increasing_v0 >= execs - 2 - || increasing_v1 >= execs - 2 - || decreasing_v0 >= execs - 2 - || decreasing_v1 >= execs - 2 - { - continue; + last = Some(val); } } + // We check for execs-2 because the logged execs may wrap and have something like + // 8 9 10 3 4 5 6 7 + if increasing_v0 >= execs - 2 + || increasing_v1 >= execs - 2 + || decreasing_v0 >= execs - 2 + || decreasing_v1 >= execs - 2 + { + continue; + } + } - let cmpmap_idx = i; - let mut cmp_values = Vec::new(); - if cmp_observer_data { - // push into orig_cmpvals - // println!("Adding to orig_cmpvals"); - for j in 0..execs { - if let Some(val) = cmp_map.values_of(i, j) { - cmp_values.push(val); - } + let cmpmap_idx = i; + let mut cmp_values = Vec::new(); + if original { + // push into orig_cmpvals + // println!("Adding to orig_cmpvals"); + for j in 0..execs { + if let Some(val) = cmp_map.values_of(i, j) { + cmp_values.push(val); } - // println!("idx: {cmpmap_idx} cmp_values: {:#?}", cmp_values); - self.orig_cmpvals.insert(cmpmap_idx, cmp_values); - } else { - // push into new_cmpvals - // println!("Adding to new_cmpvals"); - /* - unsafe { - println!( - "idx {:#?} type {:#?} sz {:#?} ptr1 {:p} val1 {:x}", - i, - cmp_map.headers()[i]._type(), - cmp_map.headers()[i].shape(), - &cmp_map.vals.operands[i][0], - cmp_map.vals.operands[i][0].v0(), - ); - } - */ - for j in 0..execs { - if let Some(val) = cmp_map.values_of(i, j) { - cmp_values.push(val); - } - } - // println!("idx: {cmpmap_idx} cmp_values: {:#?}", cmp_values); - self.new_cmpvals.insert(cmpmap_idx, cmp_values); } + // println!("idx: {cmpmap_idx} cmp_values: {:#?}", cmp_values); + meta.orig_cmpvals.insert(cmpmap_idx, cmp_values); + } else { + // push into new_cmpvals + // println!("Adding to new_cmpvals"); + /* + unsafe { + println!( + "idx {:#?} type {:#?} sz {:#?} ptr1 {:p} val1 {:x}", + i, + cmp_map.headers()[i]._type(), + cmp_map.headers()[i].shape(), + &cmp_map.vals.operands[i][0], + cmp_map.vals.operands[i][0].v0(), + ); + } + */ + for j in 0..execs { + if let Some(val) = cmp_map.values_of(i, j) { + cmp_values.push(val); + } + } + // println!("idx: {cmpmap_idx} cmp_values: {:#?}", cmp_values); + meta.new_cmpvals.insert(cmpmap_idx, cmp_values); } } } diff --git a/libafl_targets/src/cmps/observers/cmplog.rs b/libafl_targets/src/cmps/observers/cmplog.rs index f8583e8c23..3b90edcbdd 100644 --- a/libafl_targets/src/cmps/observers/cmplog.rs +++ b/libafl_targets/src/cmps/observers/cmplog.rs @@ -7,7 +7,6 @@ use core::fmt::Debug; use libafl::{ executors::ExitKind, - inputs::UsesInput, observers::{cmp::CmpValuesMetadata, CmpMap, CmpObserver, Observer}, Error, HasMetadata, }; @@ -25,10 +24,9 @@ pub struct CmpLogObserver { name: Cow<'static, str>, } -impl<'a, S> CmpObserver<'a, CmpLogMap, S, CmpValuesMetadata> for CmpLogObserver -where - S: UsesInput + HasMetadata, -{ +// Is the only difference here between this and StdCmpObserver that CMPLOG_ENABLED = 1?? +impl CmpObserver for CmpLogObserver { + type Map = CmpLogMap; /// Get the number of usable cmps (all by default) fn usable_count(&self) -> usize { match &self.size { @@ -46,12 +44,11 @@ where } } -impl<'a, S> Observer for CmpLogObserver +impl Observer for CmpLogObserver where - S: UsesInput + HasMetadata, - Self: CmpObserver<'a, CmpLogMap, S, CmpValuesMetadata>, + S: HasMetadata, { - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.map.as_mut().reset()?; unsafe { CMPLOG_ENABLED = 1; @@ -59,18 +56,17 @@ where Ok(()) } - fn post_exec( - &mut self, - state: &mut S, - _input: &S::Input, - _exit_kind: &ExitKind, - ) -> Result<(), Error> { + fn post_exec(&mut self, state: &mut S, _input: &I, _exit_kind: &ExitKind) -> Result<(), Error> { unsafe { CMPLOG_ENABLED = 0; } if self.add_meta { - self.add_cmpvalues_meta(state); + let meta = state.metadata_or_insert_with(CmpValuesMetadata::new); + + let usable_count = self.usable_count(); + + meta.add_from(usable_count, self.cmp_map_mut()); } Ok(()) diff --git a/libafl_targets/src/cmps/stages/aflpptracing.rs b/libafl_targets/src/cmps/stages/aflpptracing.rs index 790fb4bbcd..d87600d56d 100644 --- a/libafl_targets/src/cmps/stages/aflpptracing.rs +++ b/libafl_targets/src/cmps/stages/aflpptracing.rs @@ -1,11 +1,12 @@ -use alloc::borrow::Cow; +use alloc::borrow::{Cow, ToOwned}; use core::marker::PhantomData; use libafl::{ + corpus::Corpus, executors::{Executor, HasObservers}, inputs::{BytesInput, UsesInput}, observers::ObserversTuple, - stages::{colorization::TaintMetadata, RetryRestartHelper, Stage}, + stages::{colorization::TaintMetadata, RetryCountRestartHelper, Stage}, state::{HasCorpus, HasCurrentTestcase, HasExecutions, UsesState}, Error, HasMetadata, HasNamedMetadata, }; @@ -22,11 +23,14 @@ pub struct AFLppCmplogTracingStage<'a, EM, TE, Z> where TE: UsesState, { + name: Cow<'static, str>, tracer_executor: TE, - cmplog_observer_handle: Option::State>>>, + cmplog_observer_handle: Handle>, #[allow(clippy::type_complexity)] phantom: PhantomData<(EM, TE, Z)>, } +/// The name for aflpp tracing stage +pub static AFLPP_CMPLOG_TRACING_STAGE_NAME: &str = "aflpptracing"; impl UsesState for AFLppCmplogTracingStage<'_, EM, TE, Z> where @@ -40,8 +44,7 @@ where TE: UsesState, { fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed("AFLppCmplogTracingStage"); - &NAME + &self.name } } @@ -49,10 +52,16 @@ impl Stage for AFLppCmplogTracingStage<'_, EM, TE, Z> where E: UsesState, TE: Executor + HasObservers, - Self::State: - HasExecutions + HasCorpus + HasMetadata + UsesInput + HasNamedMetadata, + TE::State: HasExecutions + + HasCorpus + + HasMetadata + + UsesInput + + HasNamedMetadata + + HasCurrentTestcase, + TE::Observers: MatchNameRef + ObserversTuple, EM: UsesState, Z: UsesState, + ::Corpus: Corpus, //delete me { #[inline] fn perform( @@ -65,19 +74,17 @@ where // First run with the un-mutated input let unmutated_input = state.current_input_cloned()?; - if let Some(observer_handle) = &self.cmplog_observer_handle { - if let Some(ob) = self - .tracer_executor - .observers_mut() - .get_mut(observer_handle) - { - // This is not the original input, - // Set it to false - ob.set_original(true); - } - // I can't think of any use of this stage if you don't use AFLppCmpLogObserver - // but do nothing ofcourse + if let Some(ob) = self + .tracer_executor + .observers_mut() + .get_mut(&self.cmplog_observer_handle) + { + // This is not the original input, + // Set it to false + ob.set_original(true); } + // I can't think of any use of this stage if you don't use AFLppCmpLogObserver + // but do nothing ofcourse self.tracer_executor .observers_mut() @@ -87,8 +94,6 @@ where self.tracer_executor .run_target(fuzzer, state, manager, &unmutated_input)?; - *state.executions_mut() += 1; - self.tracer_executor .observers_mut() .post_exec_all(state, &unmutated_input, &exit_kind)?; @@ -99,19 +104,17 @@ where None => return Err(Error::unknown("No metadata found")), }; - if let Some(observer_handle) = &self.cmplog_observer_handle { - if let Some(ob) = self - .tracer_executor - .observers_mut() - .get_mut(observer_handle) - { - // This is not the original input, - // Set it to false - ob.set_original(false); - } - // I can't think of any use of this stage if you don't use AFLppCmpLogObserver - // but do nothing ofcourse + if let Some(ob) = self + .tracer_executor + .observers_mut() + .get_mut(&self.cmplog_observer_handle) + { + // This is not the original input, + // Set it to false + ob.set_original(false); } + // I can't think of any use of this stage if you don't use AFLppCmpLogObserver + // but do nothing ofcourse self.tracer_executor .observers_mut() @@ -121,8 +124,6 @@ where .tracer_executor .run_target(fuzzer, state, manager, &mutated_input)?; - *state.executions_mut() += 1; - self.tracer_executor .observers_mut() .post_exec_all(state, &mutated_input, &exit_kind)?; @@ -130,14 +131,15 @@ where Ok(()) } - fn restart_progress_should_run(&mut self, state: &mut Self::State) -> Result { - // TODO: this may need better resumption? (Or is it always used with a forkserver?) - RetryRestartHelper::restart_progress_should_run(state, self, 3) + fn should_restart(&mut self, state: &mut Self::State) -> Result { + // Tracing stage is always deterministic + // don't restart + RetryCountRestartHelper::no_retry(state, &self.name) } - fn clear_restart_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { + fn clear_progress(&mut self, state: &mut Self::State) -> Result<(), Error> { // TODO: this may need better resumption? (Or is it always used with a forkserver?) - RetryRestartHelper::clear_restart_progress(state, self) + RetryCountRestartHelper::clear_progress(state, &self.name) } } @@ -145,22 +147,16 @@ impl<'a, EM, TE, Z> AFLppCmplogTracingStage<'a, EM, TE, Z> where TE: UsesState, { - /// Creates a new default stage - pub fn new(tracer_executor: TE) -> Self { - Self { - cmplog_observer_handle: None, - tracer_executor, - phantom: PhantomData, - } - } - /// With cmplog observer - pub fn with_cmplog_observer( - tracer_executor: TE, - observer_handle: Handle>, - ) -> Self { + pub fn new(tracer_executor: TE, observer_handle: Handle>) -> Self { + let observer_name = observer_handle.name().clone(); Self { - cmplog_observer_handle: Some(observer_handle), + name: Cow::Owned( + AFLPP_CMPLOG_TRACING_STAGE_NAME.to_owned() + + ":" + + observer_name.into_owned().as_str(), + ), + cmplog_observer_handle: observer_handle, tracer_executor, phantom: PhantomData, } diff --git a/libafl_targets/src/coverage.c b/libafl_targets/src/coverage.c index 727d5e1567..2ddba3963e 100644 --- a/libafl_targets/src/coverage.c +++ b/libafl_targets/src/coverage.c @@ -8,7 +8,7 @@ typedef uint32_t prev_loc_t; /* Maximum K for top-K context sensitivity */ #define CTX_MAX_K 32U -extern uint8_t __afl_area_ptr_local[EDGES_MAP_SIZE_MAX]; +extern uint8_t __afl_area_ptr_local[EDGES_MAP_ALLOCATED_SIZE]; uint8_t *__afl_area_ptr = __afl_area_ptr_local; extern uint8_t __ddg_area_ptr_local[DDG_MAP_SIZE]; diff --git a/libafl_targets/src/coverage.rs b/libafl_targets/src/coverage.rs index 1d7b88f0e0..c93e5f3a4e 100644 --- a/libafl_targets/src/coverage.rs +++ b/libafl_targets/src/coverage.rs @@ -4,6 +4,7 @@ feature = "sancov_pcguard_edges", feature = "sancov_pcguard_hitcounts", feature = "sancov_ngram4", + feature = "sancov_ngram8", feature = "sancov_ctx" ))] use alloc::borrow::Cow; @@ -11,24 +12,28 @@ use alloc::borrow::Cow; #[cfg(any(target_os = "linux", target_vendor = "apple"))] use libafl::{mutators::Tokens, Error}; -use crate::{ACCOUNTING_MAP_SIZE, DDG_MAP_SIZE, EDGES_MAP_SIZE_IN_USE, EDGES_MAP_SIZE_MAX}; +use crate::{ACCOUNTING_MAP_SIZE, DDG_MAP_SIZE, EDGES_MAP_ALLOCATED_SIZE, EDGES_MAP_DEFAULT_SIZE}; /// The map for edges. #[no_mangle] -pub static mut __afl_area_ptr_local: [u8; EDGES_MAP_SIZE_MAX] = [0; EDGES_MAP_SIZE_MAX]; +#[allow(non_upper_case_globals)] +pub static mut __afl_area_ptr_local: [u8; EDGES_MAP_ALLOCATED_SIZE] = [0; EDGES_MAP_ALLOCATED_SIZE]; pub use __afl_area_ptr_local as EDGES_MAP; /// The map for data dependency #[no_mangle] +#[allow(non_upper_case_globals)] pub static mut __ddg_area_ptr_local: [u8; DDG_MAP_SIZE] = [0; DDG_MAP_SIZE]; pub use __ddg_area_ptr_local as DDG_MAP; /// The map for accounting mem writes. #[no_mangle] +#[allow(non_upper_case_globals)] pub static mut __afl_acc_memop_ptr_local: [u32; ACCOUNTING_MAP_SIZE] = [0; ACCOUNTING_MAP_SIZE]; pub use __afl_acc_memop_ptr_local as ACCOUNTING_MEMOP_MAP; /// The max count of edges found. +/// /// This is either computed during the compilation time or at runtime (in this case this is used to shrink the map). /// You can use this for the initial map size for the observer only if you compute this time at compilation time. pub static mut MAX_EDGES_FOUND: usize = 0; @@ -58,6 +63,9 @@ pub use __ddg_area_ptr as DDG_MAP_PTR; /// Return Tokens from the compile-time token section #[cfg(any(target_os = "linux", target_vendor = "apple"))] pub fn autotokens() -> Result { + // # Safety + // All values are checked before dereferencing. + unsafe { if __token_start.is_null() || __token_stop.is_null() { Ok(Tokens::default()) @@ -70,13 +78,15 @@ pub fn autotokens() -> Result { /// The actual size we use for the map of edges. /// This is used for forkserver backend +#[allow(non_upper_case_globals)] #[no_mangle] -pub static mut __afl_map_size: usize = EDGES_MAP_SIZE_IN_USE; +pub static mut __afl_map_size: usize = EDGES_MAP_DEFAULT_SIZE; #[cfg(any( feature = "sancov_pcguard_edges", feature = "sancov_pcguard_hitcounts", feature = "sancov_ngram4", + feature = "sancov_ngram8", feature = "sancov_ctx" ))] use libafl::observers::StdMapObserver; @@ -84,6 +94,7 @@ use libafl::observers::StdMapObserver; feature = "sancov_pcguard_edges", feature = "sancov_pcguard_hitcounts", feature = "sancov_ngram4", + feature = "sancov_ngram8", feature = "sancov_ctx" ))] use libafl_bolts::ownedref::OwnedMutSlice; @@ -100,6 +111,7 @@ use libafl_bolts::ownedref::OwnedMutSlice; feature = "sancov_pcguard_edges", feature = "sancov_pcguard_hitcounts", feature = "sancov_ngram4", + feature = "sancov_ngram8", feature = "sancov_ctx" ))] pub unsafe fn edges_map_mut_slice<'a>() -> OwnedMutSlice<'a, u8> { @@ -111,11 +123,11 @@ pub unsafe fn edges_map_mut_slice<'a>() -> OwnedMutSlice<'a, u8> { /// /// ```rust,ignore /// use libafl::observers::StdMapObserver; -/// use libafl_targets::{EDGES_MAP, EDGES_MAP_SIZE_IN_USE}; +/// use libafl_targets::{EDGES_MAP, EDGES_MAP_DEFAULT_SIZE}; /// /// #[cfg(not(feature = "pointer_maps"))] /// let observer = unsafe { -/// StdMapObserver::from_mut_ptr("edges", EDGES_MAP.as_mut_ptr(), EDGES_MAP_SIZE_IN_USE) +/// StdMapObserver::from_mut_ptr("edges", EDGES_MAP.as_mut_ptr(), EDGES_MAP_DEFAULT_SIZE) /// }; /// ``` /// @@ -137,6 +149,7 @@ pub unsafe fn edges_map_mut_slice<'a>() -> OwnedMutSlice<'a, u8> { feature = "sancov_pcguard_edges", feature = "sancov_pcguard_hitcounts", feature = "sancov_ngram4", + feature = "sancov_ngram8", feature = "sancov_ctx" ))] pub unsafe fn std_edges_map_observer<'a, S>(name: S) -> StdMapObserver<'a, u8, false> @@ -156,7 +169,7 @@ pub fn edges_map_mut_ptr() -> *mut u8 { assert!(!EDGES_MAP_PTR.is_null()); EDGES_MAP_PTR } else { - EDGES_MAP.as_mut_ptr() + &raw mut EDGES_MAP as *mut u8 } } } @@ -166,6 +179,7 @@ pub fn edges_map_mut_ptr() -> *mut u8 { feature = "sancov_pcguard_edges", feature = "sancov_pcguard_hitcounts", feature = "sancov_ngram4", + feature = "sancov_ngram8", feature = "sancov_ctx" ))] #[must_use] @@ -176,11 +190,12 @@ pub fn edges_max_num() -> usize { } else { #[cfg(feature = "pointer_maps")] { - EDGES_MAP_SIZE_MAX // the upper bound + EDGES_MAP_ALLOCATED_SIZE // the upper bound } #[cfg(not(feature = "pointer_maps"))] { - EDGES_MAP.len() + let edges_map_ptr = &raw const EDGES_MAP; + (*edges_map_ptr).len() } } } @@ -195,8 +210,7 @@ mod swap { use core::fmt::Debug; use libafl::{ - inputs::UsesInput, - observers::{DifferentialObserver, Observer, ObserversTuple, StdMapObserver}, + observers::{DifferentialObserver, Observer, StdMapObserver}, Error, }; use libafl_bolts::{ownedref::OwnedMutSlice, AsSliceMut, Named}; @@ -263,20 +277,16 @@ mod swap { } } - impl<'a, 'b> Named for DifferentialAFLMapSwapObserver<'a, 'b> { + impl Named for DifferentialAFLMapSwapObserver<'_, '_> { fn name(&self) -> &Cow<'static, str> { &self.name } } - impl<'a, 'b, S> Observer for DifferentialAFLMapSwapObserver<'a, 'b> where S: UsesInput {} + impl Observer for DifferentialAFLMapSwapObserver<'_, '_> {} - impl<'a, 'b, OTA, OTB, S> DifferentialObserver - for DifferentialAFLMapSwapObserver<'a, 'b> - where - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, + impl DifferentialObserver + for DifferentialAFLMapSwapObserver<'_, '_> { fn pre_observe_first(&mut self, _: &mut OTA) -> Result<(), Error> { let slice = self.first_map.as_slice_mut(); diff --git a/libafl_targets/src/drcov.rs b/libafl_targets/src/drcov.rs index 7998f661a9..9f8b448718 100644 --- a/libafl_targets/src/drcov.rs +++ b/libafl_targets/src/drcov.rs @@ -1,59 +1,112 @@ -//! [`DrCov`](https://dynamorio.org/page_drcov.html) support for `LibAFL` frida mode, -//! writing basic-block trace files to be read by coverage analysis tools, such as [Lighthouse](https://github.com/gaasedelen/lighthouse), -//! [bncov](https://github.com/ForAllSecure/bncov), [dragondance](https://github.com/0ffffffffh/dragondance), etc. +//! [`DrCov`](https://dynamorio.org/page_drcov.html) support for `LibAFL` `FRIDA` mode. +//! +//! It's writing basic-block trace files to be read by coverage analysis tools, such as [Lighthouse](https://github.com/gaasedelen/lighthouse), +//! [bncov](https://github.com/ForAllSecure/bncov), [cartographer](https://github.com/nccgroup/Cartographer), etc. use alloc::{string::String, vec::Vec}; -use core::ptr::addr_of; +use core::{fmt::Debug, num::ParseIntError, ptr}; use std::{ fs::File, - io::{BufWriter, Write}, - path::Path, + io::{BufRead, BufReader, BufWriter, Read, Write}, + path::{Path, PathBuf}, }; +use hashbrown::HashSet; use libafl::Error; use rangemap::RangeMap; /// A basic block struct +/// This can be used to keep track of new addresses. #[derive(Clone, Copy, Debug, PartialEq, Eq)] pub struct DrCovBasicBlock { /// Start of this basic block - pub start: usize, + pub start: u64, /// End of this basic block - pub end: usize, + pub end: u64, } -#[derive(Clone, Copy, Debug, PartialEq, Eq)] +/// A (Raw) Basic Block List Entry. +/// This is only relevant in combination with a [`DrCovReader`] or a [`DrCovWriter`]. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] #[repr(C)] -struct DrCovBasicBlockEntry { - start: u32, +pub struct DrCovBasicBlockEntry { + /// Start of this basic block + pub start: u32, + /// Size of this basic block size: u16, + /// The id of the `DrCov` module this block is in mod_id: u16, } +impl From<&[u8; 8]> for DrCovBasicBlockEntry { + fn from(value: &[u8; 8]) -> Self { + // # Safety + // The value is a valid u8 pointer. + // There's a chance that the value is not aligned to 32 bit, so we use `read_unaligned`. + assert_eq!( + size_of::(), + size_of::<[u8; 8]>(), + "`DrCovBasicBlockEntry` size changed!" + ); + unsafe { ptr::read_unaligned(ptr::from_ref(value) as *const DrCovBasicBlockEntry) } + } +} + +impl From for [u8; 8] { + fn from(value: DrCovBasicBlockEntry) -> Self { + // # Safety + // The value is a c struct. + // Casting its pointer to bytes should be safe. + // The resulting pointer needs to be less aligned. + assert_eq!( + size_of::(), + size_of::<[u8; 8]>(), + "`DrCovBasicBlockEntry` size changed!" + ); + unsafe { std::slice::from_raw_parts(ptr::from_ref(&value).cast::(), 8) } + .try_into() + .unwrap() + } +} + +impl From<&DrCovBasicBlockEntry> for &[u8] { + fn from(value: &DrCovBasicBlockEntry) -> Self { + // # Safety + // The value is a c struct. + // Casting its pointer to bytes should be safe. + unsafe { + std::slice::from_raw_parts( + ptr::from_ref(value).cast::(), + size_of::(), + ) + } + } +} + /// A writer for `DrCov` files #[derive(Debug)] pub struct DrCovWriter<'a> { - module_mapping: &'a RangeMap, + module_mapping: &'a RangeMap, } impl DrCovBasicBlock { /// Create a new [`DrCovBasicBlock`] with the given `start` and `end` addresses. #[must_use] - pub fn new(start: usize, end: usize) -> Self { + pub fn new(start: u64, end: u64) -> Self { Self { start, end } } /// Create a new [`DrCovBasicBlock`] with a given `start` address and a block size. #[must_use] - pub fn with_size(start: usize, size: usize) -> Self { - Self::new(start, start + size) + pub fn with_size(start: u64, size: usize) -> Self { + Self::new(start, start + u64::try_from(size).unwrap()) } } impl<'a> DrCovWriter<'a> { /// Create a new [`DrCovWriter`] #[must_use] - pub fn new(module_mapping: &'a RangeMap) -> Self { + pub fn new(module_mapping: &'a RangeMap) -> Self { Self { module_mapping } } @@ -63,49 +116,496 @@ impl<'a> DrCovWriter<'a> { P: AsRef, { let mut writer = BufWriter::new(File::create(path)?); + let modules = self.module_entries(); + writer.write_all(b"DRCOV VERSION: 2\nDRCOV FLAVOR: libafl\n")?; writer - .write_all(b"DRCOV VERSION: 2\nDRCOV FLAVOR: libafl\n") - .unwrap(); - - let modules: Vec<(&std::ops::Range, &(u16, String))> = - self.module_mapping.iter().collect(); - writer - .write_all(format!("Module Table: version 2, count {}\n", modules.len()).as_bytes()) - .unwrap(); - writer - .write_all(b"Columns: id, base, end, entry, checksum, timestamp, path\n") - .unwrap(); + .write_all(format!("Module Table: version 2, count {}\n", modules.len()).as_bytes())?; + writer.write_all(b"Columns: id, base, end, entry, checksum, timestamp, path\n")?; for module in modules { - let (range, (id, path)) = module; - writer - .write_all( - format!( - "{:03}, 0x{:x}, 0x{:x}, 0x00000000, 0x00000000, 0x00000000, {}\n", - id, range.start, range.end, path - ) - .as_bytes(), - ) - .unwrap(); + writer.write_all(module.to_module_line().as_bytes())?; + writer.write_all(b"\n")?; } - writer - .write_all(format!("BB Table: {} bbs\n", basic_blocks.len()).as_bytes()) - .unwrap(); - for block in basic_blocks { - let (range, (id, _)) = self.module_mapping.get_key_value(&block.start).unwrap(); - let basic_block = DrCovBasicBlockEntry { - start: (block.start - range.start) as u32, - size: (block.end - block.start) as u16, - mod_id: *id, - }; - writer - .write_all(unsafe { - std::slice::from_raw_parts(addr_of!(basic_block) as *const u8, 8) - }) - .unwrap(); + + writer.write_all(format!("BB Table: {} bbs\n", basic_blocks.len()).as_bytes())?; + for block in self.basic_block_entries(basic_blocks) { + writer.write_all((&block).into()).unwrap(); } writer.flush()?; Ok(()) } + + /// Gets a [`Vec`] of all [`DrCovModuleEntry`] elements in this [`DrCovWriter`]. + #[must_use] + pub fn module_entries(&self) -> Vec { + self.module_mapping + .iter() + .map(|x| { + let (range, (id, path)) = x; + DrCovModuleEntry { + id: *id, + base: range.start, + end: range.end, + entry: 0, + checksum: 0, + timestamp: 0, + path: PathBuf::from(path), + } + }) + .collect() + } + + /// Gets a [`Vec`] of all [`DrCovBasicBlockEntry`] elements from a list of [`DrCovBasicBlock`] entries using the modules from this [`DrCovWriter`]. + #[must_use] + pub fn basic_block_entries( + &self, + basic_blocks: &[DrCovBasicBlock], + ) -> Vec { + let mut ret = Vec::with_capacity(basic_blocks.len()); + for block in basic_blocks { + let (range, (id, _)) = self + .module_mapping + .get_key_value(&block.start) + .unwrap_or_else(|| { + panic!( + "Could not read module at addr {:?}. Module list: {:?}.", + block.start, self.module_mapping + ) + }); + let basic_block = DrCovBasicBlockEntry { + start: (block.start - range.start) as u32, + size: (block.end - block.start) as u16, + mod_id: *id, + }; + ret.push(basic_block); + } + ret + } + + /// Creates a [`DrCovReader`] module out of this [`DrCovWriter`] + #[must_use] + pub fn to_reader(&self, basic_blocks: &[DrCovBasicBlock]) -> DrCovReader { + let modules = self.module_entries(); + let basic_blocks = self.basic_block_entries(basic_blocks); + + DrCovReader::from_data(modules, basic_blocks) + } +} + +/// An entry in the `DrCov` module list. +#[derive(Debug, Clone)] +pub struct DrCovModuleEntry { + /// The index of this module + pub id: u16, + /// Base of this module + pub base: u64, + /// End address of this module + pub end: u64, + /// Entry (can be zero) + pub entry: usize, + /// Checksum (can be zero) + pub checksum: usize, + /// Timestamp (can be zero) + pub timestamp: usize, + /// The path of this module + pub path: PathBuf, +} + +impl DrCovModuleEntry { + /// Gets the module line from this [`DrCovModuleEntry`] + #[must_use] + pub fn to_module_line(&self) -> String { + format!( + "{:03}, 0x{:x}, 0x{:x}, 0x{:x}, 0x{:x}, 0x{:x}, {:?}", + self.id, self.base, self.end, self.entry, self.checksum, self.timestamp, self.path + ) + } +} + +/// Read `DrCov` (v2) files created with [`DrCovWriter`] or other tools +pub struct DrCovReader { + /// The modules in this `DrCov` file + pub module_entries: Vec, + /// The list of basic blocks as [`DrCovBasicBlockEntry`]. + /// To get the blocks as [`DrCovBasicBlock`], call [`Self::basic_blocks`] instead. + pub basic_block_entries: Vec, +} + +impl Debug for DrCovReader { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("DrCovReader") + .field("modules", &self.module_entries) + .field("basic_blocks", &self.basic_block_entries.len()) + .finish() + } +} + +fn parse_hex_to_usize(str: &str) -> Result { + // Cut off the first 0x + usize::from_str_radix(&str[2..], 16) +} + +fn parse_hex_to_u64(str: &str) -> Result { + // Cut off the first 0x + u64::from_str_radix(&str[2..], 16) +} + +impl DrCovReader { + /// Parse a `drcov` file to memory. + pub fn read + ?Sized>(file: &P) -> Result { + let f = File::open(file)?; + let mut reader = BufReader::new(f); + + let mut header = String::new(); + reader.read_line(&mut header)?; + + let drcov_version = "DRCOV VERSION: 2"; + if header.to_uppercase().trim() != drcov_version { + return Err(Error::illegal_state(format!( + "No valid header. Expected {drcov_version} but got {header}" + ))); + } + + header.clear(); + reader.read_line(&mut header)?; + + let drcov_flavor = "DRCOV FLAVOR:"; + if header.to_uppercase().starts_with(drcov_flavor) { + // Ignore flavor line if it's not present. + log::info!("Got drcov flavor {drcov_flavor}"); + + header.clear(); + reader.read_line(&mut header)?; + } + + let Some(Ok(module_count)) = header + .split("Module Table: version 2, count ") + .nth(1) + .map(|x| x.trim().parse::()) + else { + return Err(Error::illegal_state(format!( + "Expected module table but got: {header}" + ))); + }; + + header.clear(); + reader.read_line(&mut header)?; + + if !header.starts_with("Columns: id, base, end, entry, checksum, timestamp, path") { + return Err(Error::illegal_state(format!( + "Module table has unknown or illegal columns: {header}" + ))); + } + + let mut modules = Vec::with_capacity(module_count); + + for _ in 0..module_count { + header.clear(); + reader.read_line(&mut header)?; + + let err = |x| { + Error::illegal_argument(format!( + "Unexpected module entry while parsing {x} in header: {header}" + )) + }; + + let mut split = header.split(", "); + + let Some(Ok(id)) = split.next().map(str::parse) else { + return Err(err("id")); + }; + + let Some(Ok(base)) = split.next().map(parse_hex_to_u64) else { + return Err(err("base")); + }; + + let Some(Ok(end)) = split.next().map(parse_hex_to_u64) else { + return Err(err("end")); + }; + + let Some(Ok(entry)) = split.next().map(parse_hex_to_usize) else { + return Err(err("entry")); + }; + + let Some(Ok(checksum)) = split.next().map(parse_hex_to_usize) else { + return Err(err("checksum")); + }; + + let Some(Ok(timestamp)) = split.next().map(parse_hex_to_usize) else { + return Err(err("timestamp")); + }; + + let Some(path) = split.next().map(|s| PathBuf::from(s.trim())) else { + return Err(err("path")); + }; + + modules.push(DrCovModuleEntry { + id, + base, + end, + entry, + checksum, + timestamp, + path, + }); + } + + header.clear(); + reader.read_line(&mut header)?; + + //"BB Table: {} bbs\n" + if !header.starts_with("BB Table: ") { + return Err(Error::illegal_state(format!( + "Error reading BB Table header. Got: {header}" + ))); + } + let mut bb = header.split(' '); + let Some(Ok(bb_count)) = bb.nth(2).map(str::parse) else { + return Err(Error::illegal_state(format!( + "Error parsing BB Table header count. Got: {header}" + ))); + }; + + let mut basic_blocks = Vec::with_capacity(bb_count); + + for _ in 0..bb_count { + let mut bb_entry = [0_u8; 8]; + reader.read_exact(&mut bb_entry)?; + basic_blocks.push((&bb_entry).into()); + } + + Ok(DrCovReader { + module_entries: modules, + basic_block_entries: basic_blocks, + }) + } + + /// Creates a [`DrCovReader`] pre-filled with data. + /// Rather pointless, use [`Self::read`] to actually read a file from disk. + #[must_use] + pub fn from_data( + modules: Vec, + basic_blocks: Vec, + ) -> Self { + Self { + module_entries: modules, + basic_block_entries: basic_blocks, + } + } + + /// Get a list of traversed [`DrCovBasicBlock`] nodes + #[must_use] + pub fn basic_blocks(&self) -> Vec { + let mut ret = Vec::with_capacity(self.basic_block_entries.len()); + + for basic_block in &self.basic_block_entries { + let bb_id = basic_block.mod_id; + if let Some(module) = self.module_by_id(bb_id) { + let start = module.base + u64::from(basic_block.start); + let end = start + u64::from(basic_block.size); + ret.push(DrCovBasicBlock::new(start, end)); + } else { + log::error!("Skipping basic block outside of any modules: {basic_block:?}"); + } + } + ret + } + + /// Get the module (range) map. This can be used to create a new [`DrCovWriter`]. + #[must_use] + pub fn module_map(&self) -> RangeMap { + let mut ret = RangeMap::new(); + for module in &self.module_entries { + ret.insert( + module.base..module.end, + ( + module.id, + module.path.clone().into_os_string().into_string().unwrap(), + ), + ); + } + ret + } + + /// Writes this data out to disk (again). + pub fn write>(&self, path: P) -> Result<(), Error> { + let ranges = self.module_map(); + let mut writer = DrCovWriter::new(&ranges); + writer.write(path, &self.basic_blocks()) + } + + /// Gets a list of all basic blocks, as absolute addresses, for u64 targets. + /// Useful for example for [`JmpScare`](https://github.com/fgsect/JMPscare) and other analyses. + #[must_use] + pub fn basic_block_addresses_u64(&self) -> Vec { + self.basic_blocks().iter().map(|x| x.start).collect() + } + + /// Gets a list of all basic blocks, as absolute addresses, for u32 targets. + /// Will return an [`Error`] if addresses are larger than 32 bit. + pub fn basic_block_addresses_u32(&self) -> Result, Error> { + let blocks = self.basic_blocks(); + let mut ret = Vec::with_capacity(blocks.len()); + for block in self.basic_blocks() { + ret.push(u32::try_from(block.start)?); + } + Ok(ret) + } + + /// Merges the contents of another [`DrCovReader`] instance into this one. + /// Useful to merge multiple coverage files of a fuzzing run into one drcov file. + /// Similar to [drcov-merge](https://github.com/vanhauser-thc/drcov-merge). + /// + /// If `unique` is set to 1, each block will end up in the resulting [`DrCovReader`] at most once. + /// + /// Will return an `Error` if the individual modules are not mergable. + /// In this case, the module list may already have been changed. + pub fn merge(&mut self, other: &DrCovReader, unique: bool) -> Result<(), Error> { + for module in &other.module_entries { + if let Some(own_module) = self.module_by_id(module.id) { + // Module exists, make sure it's the same. + if own_module.base != module.base || own_module.end != module.end { + return Err(Error::illegal_argument(format!("Module id of file to merge doesn't fit! Own modules: {:#x?}, other modules: {:#x?}", self.module_entries, other.module_entries))); + } + } else { + // We don't know the module. Insert as new module. + self.module_entries.push(module.clone()); + } + } + + if unique { + self.make_unique(); + } + let mut blocks = HashSet::new(); + + for block in &self.basic_block_entries { + blocks.insert(*block); + } + + for block in &other.basic_block_entries { + if !blocks.contains(block) { + blocks.insert(*block); + self.basic_block_entries.push(*block); + } + } + + Ok(()) + } + + /// Remove blocks that exist more than once in the trace, in-place. + pub fn make_unique(&mut self) { + let mut blocks = HashSet::new(); + let new_vec = self + .basic_block_entries + .iter() + .filter(|x| { + if blocks.contains(x) { + false + } else { + blocks.insert(*x); + true + } + }) + .copied() + .collect(); + drop(blocks); + + self.basic_block_entries = new_vec; + } + + /// Returns the module for a given `id`, or [`None`]. + #[must_use] + pub fn module_by_id(&self, id: u16) -> Option<&DrCovModuleEntry> { + self.module_entries.iter().find(|module| module.id == id) + } +} + +#[cfg(test)] +mod test { + use std::{ + env::temp_dir, + fs, + path::PathBuf, + string::{String, ToString}, + }; + + use rangemap::RangeMap; + + use super::{DrCovModuleEntry, DrCovReader, DrCovWriter}; + use crate::drcov::{DrCovBasicBlock, DrCovBasicBlockEntry}; + + #[test] + fn test_write_read_drcov() { + let mut ranges = RangeMap::::new(); + + ranges.insert(0x00..0x4242, (0xffff, "fuzzer".to_string())); + + ranges.insert(0x4242..0xFFFF, (0, "Entry0".to_string())); + ranges.insert(0xFFFF..0x424242, (1, "Entry1".to_string())); + + let mut writer = DrCovWriter::new(&ranges); + + let tmpdir = temp_dir(); + + let drcov_tmp_file = tmpdir.join("drcov_test.drcov"); + writer + .write( + &drcov_tmp_file, + &[ + DrCovBasicBlock::new(0x4242, 0x4250), + DrCovBasicBlock::new(0x10, 0x100), + DrCovBasicBlock::new(0x424200, 0x424240), + DrCovBasicBlock::new(0x10, 0x100), + ], + ) + .unwrap(); + + let reader = DrCovReader::read(&drcov_tmp_file).unwrap(); + + assert_eq!(reader.basic_block_entries.len(), 4); + assert_eq!(reader.module_map().len(), 3); + assert_eq!(reader.basic_blocks().len(), 4); + + // Let's do one more round :) + reader.write(&drcov_tmp_file).unwrap(); + let reader = DrCovReader::read(&drcov_tmp_file).unwrap(); + + assert_eq!(reader.basic_block_entries.len(), 4); + assert_eq!(reader.module_map().len(), 3); + assert_eq!(reader.basic_blocks().len(), 4); + + fs::remove_file(&drcov_tmp_file).unwrap(); + } + + #[test] + fn test_merge() { + let modules = vec![DrCovModuleEntry { + id: 0, + base: 0, + end: 0x4242, + entry: 0, + checksum: 0, + timestamp: 0, + path: PathBuf::new(), + }]; + let basic_blocks1 = vec![DrCovBasicBlockEntry { + mod_id: 0, + start: 0, + size: 42, + }]; + + let mut basic_blocks2 = basic_blocks1.clone(); + basic_blocks2.push(DrCovBasicBlockEntry { + mod_id: 0, + start: 4200, + size: 42, + }); + + let mut first = DrCovReader::from_data(modules.clone(), basic_blocks1); + let second = DrCovReader::from_data(modules, basic_blocks2); + + first.merge(&second, true).unwrap(); + assert_eq!(first.basic_block_entries.len(), 2); + } } diff --git a/libafl_targets/src/forkserver.c b/libafl_targets/src/forkserver.c index 94ebc00e22..e3f8f831b1 100644 --- a/libafl_targets/src/forkserver.c +++ b/libafl_targets/src/forkserver.c @@ -239,7 +239,7 @@ void __afl_start_forkserver(void) { void (*old_sigchld_handler)(int) = signal(SIGCHLD, SIG_DFL); - int autodict_on = __token_start != NULL && __token_stop != NULL; + int autotokens_on = __token_start != NULL && __token_stop != NULL; /* Phone home and tell the parent that we're OK. If parent isn't there, assume we're not running in forkserver mode and just execute program. */ @@ -256,7 +256,7 @@ void __afl_start_forkserver(void) { status = FS_NEW_OPT_MAPSIZE; if (__afl_sharedmem_fuzzing) { status |= FS_NEW_OPT_SHDMEM_FUZZ; } - if (autodict_on) { status |= FS_NEW_OPT_AUTODICT; } + if (autotokens_on) { status |= FS_NEW_OPT_AUTODICT; } if (write(FORKSRV_FD + 1, msg, 4) != 4) { _exit(1); } @@ -266,14 +266,14 @@ void __afl_start_forkserver(void) { status = __afl_map_size; if (write(FORKSRV_FD + 1, msg, 4) != 4) { _exit(1); } - // FS_NEW_OPT_AUTODICT - send autodictionary - if (autodict_on) { - // pass the dictionary through the forkserver FD + // FS_NEW_OPT_AUTODICT - send autotokens + if (autotokens_on) { + // pass the autotokens through the forkserver FD uint32_t len = (__token_stop - __token_start), offset = 0; if (write(FORKSRV_FD + 1, &len, 4) != 4) { - write(2, "Error: could not send dictionary len\n", - strlen("Error: could not send dictionary len\n")); + write(2, "Error: could not send autotokens len\n", + strlen("Error: could not send autotokens len\n")); _exit(1); } @@ -282,7 +282,7 @@ void __afl_start_forkserver(void) { ret = write(FORKSRV_FD + 1, __token_start + offset, len); if (ret < 1) { - write_error("could not send dictionary"); + write_error("could not send autotokens"); _exit(1); } diff --git a/libafl_targets/src/lib.rs b/libafl_targets/src/lib.rs index 93c94b229e..74a6c5fce4 100644 --- a/libafl_targets/src/lib.rs +++ b/libafl_targets/src/lib.rs @@ -1,24 +1,7 @@ //! `libafl_targets` contains runtime code, injected in the target itself during compilation. #![no_std] -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(clippy::all)] -#![deny(clippy::pedantic)] -#![forbid(unexpected_cfgs)] // For `std::simd` #![cfg_attr(nightly, feature(portable_simd))] -#![allow( - clippy::unreadable_literal, - clippy::type_repetition_in_bounds, - clippy::missing_errors_doc, - clippy::cast_possible_truncation, - clippy::used_underscore_binding, - clippy::ptr_as_ptr, - clippy::missing_panics_doc, - clippy::missing_docs_in_private_items, - clippy::module_name_repetitions, - clippy::pub_underscore_fields, - clippy::into_iter_without_iter, // broken -)] #![cfg_attr(not(test), warn( missing_debug_implementations, missing_docs, @@ -72,6 +55,7 @@ include!(concat!(env!("OUT_DIR"), "/constants.rs")); feature = "sancov_pcguard_edges", feature = "sancov_pcguard_hitcounts", feature = "sancov_ngram4", + feature = "sancov_ngram8", feature = "sancov_ctx" ))] pub mod sancov_pcguard; @@ -79,6 +63,7 @@ pub mod sancov_pcguard; feature = "sancov_pcguard_edges", feature = "sancov_pcguard_hitcounts", feature = "sancov_ngram4", + feature = "sancov_ngram8", feature = "sancov_ctx" ))] pub use sancov_pcguard::*; @@ -101,6 +86,8 @@ pub mod sanitizer_ifaces { #![allow(missing_docs)] #![allow(missing_debug_implementations)] #![allow(unused_qualifications)] + #![allow(clippy::pub_underscore_fields)] + include!(concat!(env!("OUT_DIR"), "/sanitizer_interfaces.rs")); } diff --git a/libafl_targets/src/libfuzzer.c b/libafl_targets/src/libfuzzer.c index 70df4f1132..172e4496a0 100644 --- a/libafl_targets/src/libfuzzer.c +++ b/libafl_targets/src/libfuzzer.c @@ -21,7 +21,9 @@ EXT_FUNC(LLVMFuzzerCustomCrossOver, size_t, false); EXT_FUNC_IMPL(LLVMFuzzerTestOneInput, int, (const uint8_t *Data, size_t Size), false) { - fprintf(stderr, "Weakly defined \"LLVMFuzzerTestOneInput\" is linked. Did you add extern \"C\" to your harness?\n"); + fprintf(stderr, + "Weakly defined \"LLVMFuzzerTestOneInput\" is linked. Did you add " + "extern \"C\" to your harness?\n"); abort(); return 0; } diff --git a/libafl_targets/src/libfuzzer/mod.rs b/libafl_targets/src/libfuzzer/mod.rs index 9ff5abca65..31bae56d35 100644 --- a/libafl_targets/src/libfuzzer/mod.rs +++ b/libafl_targets/src/libfuzzer/mod.rs @@ -1,4 +1,5 @@ //! [`Libfuzzer`](https://www.llvm.org/docs/LibFuzzer.html)-style runtime wrapper for `LibAFL`. +//! //! This makes `LibAFL` interoperable with harnesses written for other fuzzers like `Libfuzzer` and [`AFLplusplus`](aflplus.plus). //! We will interact with a C++ target, so use external c functionality @@ -22,11 +23,12 @@ extern "C" { /// Calls the (native) libfuzzer initialize function. /// Returns the value returned by the init function. -/// # Note +/// +/// # Safety /// Calls the libfuzzer-style init function which is native code. #[allow(clippy::similar_names)] #[allow(clippy::must_use_candidate)] // nobody uses that return code... -pub fn libfuzzer_initialize(args: &[String]) -> i32 { +pub unsafe fn libfuzzer_initialize(args: &[String]) -> i32 { let args: Vec = args.iter().map(|x| x.clone() + "\0").collect(); let argv: Vec<*const u8> = args.iter().map(|x| x.as_bytes().as_ptr()).collect(); assert!(argv.len() < i32::MAX as usize); @@ -34,14 +36,15 @@ pub fn libfuzzer_initialize(args: &[String]) -> i32 { let argc = argv.len() as i32; unsafe { let argv_ptr = argv.as_ptr(); - libafl_targets_libfuzzer_init(core::ptr::addr_of!(argc), core::ptr::addr_of!(argv_ptr)) + libafl_targets_libfuzzer_init(&raw const argc, &raw const argv_ptr) } } /// Call a single input of a libfuzzer-style cpp-harness -/// # Note +/// +/// # Safety /// Calls the libfuzzer harness. We actually think the target is unsafe and crashes eventually, that's why we do all this fuzzing. #[allow(clippy::must_use_candidate)] -pub fn libfuzzer_test_one_input(buf: &[u8]) -> i32 { +pub unsafe fn libfuzzer_test_one_input(buf: &[u8]) -> i32 { unsafe { LLVMFuzzerTestOneInput(buf.as_ptr(), buf.len()) } } diff --git a/libafl_targets/src/libfuzzer/mutators.rs b/libafl_targets/src/libfuzzer/mutators.rs index 74384804ae..95f01443e8 100644 --- a/libafl_targets/src/libfuzzer/mutators.rs +++ b/libafl_targets/src/libfuzzer/mutators.rs @@ -19,7 +19,7 @@ use libafl::{ state::{HasCorpus, HasMaxSize, HasRand}, Error, }; -use libafl_bolts::{rands::Rand, AsSlice, Named}; +use libafl_bolts::{rands::Rand, AsSlice, HasLen, Named}; extern "C" { fn libafl_targets_has_libfuzzer_custom_mutator() -> bool; @@ -82,7 +82,7 @@ pub extern "C" fn LLVMFuzzerMutate(data: *mut u8, size: usize, max_size: usize) /// A proxy which wraps a targeted mutator. This is used to provide dynamic access to a global /// mutator without knowing the concrete type, which is necessary for custom mutators. -struct MutatorProxy<'a, M, MT, S> { +struct MutatorProxy<'a, M, S> { /// Pointer to the state of the fuzzer state: Rc>, // refcell to prevent double-mutability over the pointer /// A weak reference to the mutator to provide to the custom mutator @@ -90,10 +90,10 @@ struct MutatorProxy<'a, M, MT, S> { /// The result of mutation, to be propagated to the mutational stage result: Rc>>, /// Stage index, which is used by libafl mutator implementations - phantom: PhantomData<(&'a mut (), MT)>, + phantom: PhantomData<&'a mut ()>, } -impl<'a, M, MT, S> MutatorProxy<'a, M, MT, S> { +impl<'a, M, S> MutatorProxy<'a, M, S> { /// Crate a new mutator proxy for the given state and mutator fn new( state: &'a mut S, @@ -110,9 +110,8 @@ impl<'a, M, MT, S> MutatorProxy<'a, M, MT, S> { /// Create a weak version of the proxy, which will become unusable when the custom mutator /// is no longer permitted to be executed. - fn weak( - &self, - ) -> WeakMutatorProxy FnMut(&'b mut S)) -> bool, M, MT, S> { + #[allow(clippy::type_complexity)] + fn weak(&self) -> WeakMutatorProxy FnMut(&'b mut S)) -> bool, M, S> { let state = Rc::downgrade(&self.state); WeakMutatorProxy { accessor: move |f: &mut dyn for<'b> FnMut(&'b mut S)| { @@ -136,23 +135,23 @@ impl<'a, M, MT, S> MutatorProxy<'a, M, MT, S> { /// that once a libafl mutator exits scope (e.g., once the mutational stage is over) that the /// mutator is no longer accessible by the custom mutator. #[derive(Clone)] -struct WeakMutatorProxy { +struct WeakMutatorProxy { /// Function which will perform the access to the state. accessor: F, + /// A weak reference to the mutator mutator: Weak>, - /// The stage index to provide to the mutator, when executed. /// The result of mutation, to be propagated to the mutational stage result: Rc>>, - phantom: PhantomData<(MT, S)>, + phantom: PhantomData, } -impl ErasedLLVMFuzzerMutator for WeakMutatorProxy +impl ErasedLLVMFuzzerMutator for WeakMutatorProxy where F: Fn(&mut dyn for<'b> FnMut(&'b mut S)) -> bool, - M: ScheduledMutator, - MT: MutatorsTuple, + M: ScheduledMutator, + M::Mutations: MutatorsTuple, S: HasMaxSize + UsesInput, { fn mutate(&self, data: *mut u8, size: usize, max_size: usize) -> usize { @@ -197,17 +196,19 @@ where } } -/// A mutator which invokes a libFuzzer-like custom mutator or crossover. The `CROSSOVER` constant +/// A mutator which invokes a libFuzzer-like custom mutator or crossover. +/// +/// The `CROSSOVER` constant /// controls whether this mutator invokes `LLVMFuzzerCustomMutate` and `LLVMFuzzerCustomCrossover`. /// You should avoid using crossover-like mutators with custom mutators as this may lead to the /// injection of some input portions to another in ways which violate structure. #[derive(Debug)] -pub struct LLVMCustomMutator { +pub struct LLVMCustomMutator { mutator: Rc>, - phantom: PhantomData, + phantom: PhantomData, } -impl LLVMCustomMutator { +impl LLVMCustomMutator { /// Create the mutator which will invoke the custom mutator, emitting an error if the custom mutator is not present /// /// # Safety @@ -236,7 +237,7 @@ impl LLVMCustomMutator { } } -impl LLVMCustomMutator { +impl LLVMCustomMutator { /// Create the mutator which will invoke the custom crossover, emitting an error if the custom crossover is not present /// /// # Safety @@ -265,34 +266,33 @@ impl LLVMCustomMutator { } } -impl ComposedByMutations - for LLVMCustomMutator +impl ComposedByMutations for LLVMCustomMutator where - MT: MutatorsTuple, - S: UsesInput + HasRand + HasMaxSize, - SM: ScheduledMutator, + SM: ScheduledMutator, + SM::Mutations: MutatorsTuple, { - fn mutations(&self) -> &MT { + type Mutations = SM::Mutations; + fn mutations(&self) -> &Self::Mutations { unimplemented!("It is unsafe to provide reference-based access to the mutators as they are behind a RefCell.") } - fn mutations_mut(&mut self) -> &mut MT { + fn mutations_mut(&mut self) -> &mut Self::Mutations { unimplemented!("It is unsafe to provide reference-based access to the mutators as they are behind a RefCell.") } } -impl Named for LLVMCustomMutator { +impl Named for LLVMCustomMutator { fn name(&self) -> &Cow<'static, str> { static NAME: Cow<'static, str> = Cow::Borrowed("LLVMCustomMutator"); &NAME } } -impl Mutator for LLVMCustomMutator +impl Mutator for LLVMCustomMutator where - MT: MutatorsTuple + 'static, S: UsesInput + HasRand + HasMaxSize + 'static, - SM: ScheduledMutator + 'static, + SM: ScheduledMutator + 'static, + SM::Mutations: MutatorsTuple, { #[inline] fn mutate(&mut self, state: &mut S, input: &mut S::Input) -> Result { @@ -300,11 +300,11 @@ where } } -impl ScheduledMutator for LLVMCustomMutator +impl ScheduledMutator for LLVMCustomMutator where - SM: ScheduledMutator + 'static, - MT: MutatorsTuple + 'static, + SM: ScheduledMutator + 'static, S: UsesInput + HasRand + HasMaxSize + 'static, + SM::Mutations: MutatorsTuple, { fn iterations(&self, state: &mut S, input: &S::Input) -> u64 { let mutator = self.mutator.deref().borrow(); @@ -322,10 +322,9 @@ where input: &mut S::Input, ) -> Result { let seed = state.rand_mut().next(); - let target = input.bytes(); - let mut bytes = Vec::with_capacity(state.max_size()); - bytes.extend_from_slice(target.as_slice()); - bytes.resize(state.max_size(), 0); + let len_orig = input.bytes().len(); + let len_max = state.max_size(); + input.resize(len_max, 0); // we assume that the fuzzer did not use this mutator, but instead utilised their own let result = Rc::new(RefCell::new(Ok(MutationResult::Mutated))); @@ -334,11 +333,11 @@ where let mut mutator = mutator.borrow_mut(); mutator.replace(Box::new(proxy.weak())) }); - let new_size = unsafe { + let new_len = unsafe { libafl_targets_libfuzzer_custom_mutator( - bytes.as_mut_ptr(), - target.as_slice().len(), - bytes.len(), + input.bytes_mut().as_mut_ptr(), + len_orig, + len_max, seed as u32, ) }; @@ -350,24 +349,27 @@ where if result.deref().borrow().is_err() { return result.replace(Ok(MutationResult::Skipped)); } - bytes.truncate(new_size); - input.bytes_mut().copy_from_slice(&bytes); + if new_len > len_max { + return Err(Error::illegal_state("LLVMFuzzerCustomMutator returned more bytes than allowed. Expected up to {max_len} but got {new_len}")); + } + input.resize(new_len, 0); Ok(MutationResult::Mutated) } } -impl Named for LLVMCustomMutator { +impl Named for LLVMCustomMutator { fn name(&self) -> &Cow<'static, str> { - static NAME: Cow<'static, str> = Cow::Borrowed("LLVMCustomCrossover"); + static NAME: Cow<'static, str> = Cow::Borrowed("LLVMCustomMutator"); &NAME } } -impl Mutator for LLVMCustomMutator +impl Mutator for LLVMCustomMutator where - MT: MutatorsTuple + 'static, S: UsesInput + HasRand + HasMaxSize + HasCorpus + 'static, - SM: ScheduledMutator + 'static, + SM: ScheduledMutator + 'static, + S::Corpus: Corpus, + SM::Mutations: MutatorsTuple, { #[inline] fn mutate(&mut self, state: &mut S, input: &mut S::Input) -> Result { @@ -375,11 +377,12 @@ where } } -impl ScheduledMutator for LLVMCustomMutator +impl ScheduledMutator for LLVMCustomMutator where - SM: ScheduledMutator + 'static, - MT: MutatorsTuple + 'static, + SM: ScheduledMutator + 'static, S: UsesInput + HasRand + HasMaxSize + HasCorpus + 'static, + S::Corpus: Corpus, + SM::Mutations: MutatorsTuple, { fn iterations(&self, state: &mut S, input: &S::Input) -> u64 { let mutator = self.mutator.deref().borrow(); @@ -396,22 +399,26 @@ where state: &mut S, input: &mut S::Input, ) -> Result { + let id = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); // We don't want to use the testcase we're already using for splicing - let idx = random_corpus_id_with_disabled!(state.corpus(), state.rand_mut()); if let Some(cur) = state.corpus().current() { - if idx == *cur { + if id == *cur { return Ok(MutationResult::Skipped); } } - let mut other_testcase = state.corpus().get_from_all(idx)?.borrow_mut(); + let mut other_testcase = state.corpus().get_from_all(id)?.borrow_mut(); let other = other_testcase.load_input(state.corpus())?; let data2 = Vec::from(other.bytes()); drop(other_testcase); let seed = state.rand_mut().next(); let mut out = vec![0u8; state.max_size()]; - let data1 = input.bytes(); + + let len_max = state.max_size(); + let len_orig = input.len(); + + input.resize(len_max, 0); // we assume that the fuzzer did not use this mutator, but instead utilised their own let result = Rc::new(RefCell::new(Ok(MutationResult::Mutated))); @@ -420,14 +427,14 @@ where let mut mutator = mutator.borrow_mut(); mutator.replace(Box::new(proxy.weak())) }); - let new_size = unsafe { + let new_len = unsafe { libafl_targets_libfuzzer_custom_crossover( - data1.as_ptr(), - data1.len(), + input.bytes_mut().as_mut_ptr(), + len_orig, data2.as_ptr(), data2.len(), out.as_mut_ptr(), - out.len(), + len_max, seed as u32, ) }; @@ -439,8 +446,12 @@ where if result.deref().borrow().is_err() { return result.replace(Ok(MutationResult::Skipped)); } - out.truncate(new_size); - input.bytes_mut().copy_from_slice(&out); + + if new_len > len_max { + return Err(Error::illegal_state("LLVMFuzzerCustomCrossOver returned more bytes than allowed. Expected up to {max_len} but got {new_len}")); + } + + input.resize(new_len, 0); Ok(MutationResult::Mutated) } } diff --git a/libafl_targets/src/libfuzzer/observers/oom.rs b/libafl_targets/src/libfuzzer/observers/oom.rs index 8fb1dea3a6..35d59d511d 100644 --- a/libafl_targets/src/libfuzzer/observers/oom.rs +++ b/libafl_targets/src/libfuzzer/observers/oom.rs @@ -3,12 +3,9 @@ use core::{ffi::c_void, fmt::Debug}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; use libafl::{ - events::EventFirer, executors::ExitKind, - feedbacks::Feedback, - inputs::UsesInput, - observers::{Observer, ObserversTuple}, - state::State, + feedbacks::{Feedback, StateInitializer}, + observers::Observer, Error, }; use libafl_bolts::Named; @@ -94,11 +91,8 @@ impl Named for OomObserver { } } -impl Observer for OomObserver -where - S: UsesInput, -{ - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { +impl Observer for OomObserver { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { OOMED.store(false, Ordering::Relaxed); // must reset for platforms which do not offer malloc tracking MALLOC_SIZE.store(0, Ordering::Relaxed); @@ -109,7 +103,7 @@ where fn post_exec( &mut self, _state: &mut S, - _input: &S::Input, + _input: &I, _exit_kind: &ExitKind, ) -> Result<(), Error> { RUNNING.store(false, Ordering::Relaxed); @@ -117,14 +111,14 @@ where Ok(()) } - fn pre_exec_child(&mut self, state: &mut S, input: &S::Input) -> Result<(), Error> { + fn pre_exec_child(&mut self, state: &mut S, input: &I) -> Result<(), Error> { self.pre_exec(state, input) } fn post_exec_child( &mut self, state: &mut S, - input: &S::Input, + input: &I, exit_kind: &ExitKind, ) -> Result<(), Error> { self.post_exec(state, input, exit_kind) @@ -149,22 +143,17 @@ impl Named for OomFeedback { } } -impl Feedback for OomFeedback -where - S: State, -{ - fn is_interesting( +impl StateInitializer for OomFeedback {} + +impl Feedback for OomFeedback { + fn is_interesting( &mut self, _state: &mut S, _manager: &mut EM, - _input: &S::Input, + _input: &I, _observers: &OT, _exit_kind: &ExitKind, - ) -> Result - where - EM: EventFirer, - OT: ObserversTuple, - { + ) -> Result { Ok(Self::oomed()) } diff --git a/libafl_targets/src/sancov_8bit.rs b/libafl_targets/src/sancov_8bit.rs index 4f2d3ed8d2..b4ef07bcde 100644 --- a/libafl_targets/src/sancov_8bit.rs +++ b/libafl_targets/src/sancov_8bit.rs @@ -1,6 +1,5 @@ //! [`LLVM` `8-bit-counters`](https://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards) runtime for `LibAFL`. use alloc::vec::Vec; -use core::ptr::addr_of_mut; use libafl_bolts::{ownedref::OwnedMutSlice, AsSlice, AsSliceMut}; @@ -8,13 +7,24 @@ use libafl_bolts::{ownedref::OwnedMutSlice, AsSlice, AsSliceMut}; /// They are initialized by calling [`__sanitizer_cov_8bit_counters_init`]( pub static mut COUNTERS_MAPS: Vec> = Vec::new(); +/// Gets a pointer to [`COUNTER_MAPS`] +fn counter_maps_ptr() -> *const Vec> { + &raw const COUNTERS_MAPS +} + +/// Gets a pointer to [`COUNTER_MAPS`], mut +fn counter_maps_ptr_mut() -> *mut Vec> { + &raw mut COUNTERS_MAPS +} + /// Create more copies of the counters maps /// /// # Safety /// You are responsible for ensuring there is no multi-mutability! #[must_use] pub unsafe fn extra_counters() -> Vec> { - COUNTERS_MAPS + let counter_maps = &*counter_maps_ptr(); + counter_maps .iter() .map(|counters| { OwnedMutSlice::from_raw_parts_mut( @@ -26,12 +36,16 @@ pub unsafe fn extra_counters() -> Vec> { } /// Initialize the sancov `8-bit-counters` - usually called by `llvm`. +/// +/// # Safety +/// Start and stop are being dereferenced. #[no_mangle] #[allow(clippy::cast_sign_loss)] #[allow(clippy::not_unsafe_ptr_arg_deref)] -pub extern "C" fn __sanitizer_cov_8bit_counters_init(start: *mut u8, stop: *mut u8) { +pub unsafe extern "C" fn __sanitizer_cov_8bit_counters_init(start: *mut u8, stop: *mut u8) { unsafe { - for existing in &mut *addr_of_mut!(COUNTERS_MAPS) { + let counter_maps = &mut *counter_maps_ptr_mut(); + for existing in counter_maps { let range = existing.as_slice_mut().as_mut_ptr() ..=existing .as_slice_mut() @@ -46,8 +60,10 @@ pub extern "C" fn __sanitizer_cov_8bit_counters_init(start: *mut u8, stop: *mut return; } } + + let counter_maps = &mut *counter_maps_ptr_mut(); // we didn't overlap; keep going - COUNTERS_MAPS.push(OwnedMutSlice::from_raw_parts_mut( + counter_maps.push(OwnedMutSlice::from_raw_parts_mut( start, stop.offset_from(start) as usize, )); @@ -65,14 +81,12 @@ mod observers { hash::{Hash, Hasher}, iter::Flatten, mem::size_of, - ptr::{addr_of, addr_of_mut}, slice::{from_raw_parts, Iter, IterMut}, }; use ahash::RandomState; use libafl::{ - inputs::UsesInput, - observers::{DifferentialObserver, MapObserver, Observer, ObserversTuple}, + observers::{DifferentialObserver, MapObserver, Observer}, Error, }; use libafl_bolts::{ @@ -81,13 +95,13 @@ mod observers { use meminterval::IntervalTree; use serde::{Deserialize, Serialize}; - use super::COUNTERS_MAPS; + use super::{counter_maps_ptr, counter_maps_ptr_mut}; #[must_use] #[export_name = "counters_maps_observer"] - /// Create a new [`CountersMultiMapObserver`] of the [`COUNTERS_MAPS`]. + /// Create a new [`CountersMultiMapObserver`] of the [`super::COUNTERS_MAPS`]. /// - /// This is a special [`libafl::observers::MultiMapObserver`] for the [`COUNTERS_MAPS`] and may be used when + /// This is a special [`libafl::observers::MultiMapObserver`] for the [`super::COUNTERS_MAPS`] and may be used when /// 8-bit counters are used for `SanitizerCoverage`. You can utilize this observer in a /// [`libafl::observers::HitcountsIterableMapObserver`] like so: /// @@ -113,7 +127,7 @@ mod observers { } /// The [`CountersMultiMapObserver`] observes all the counters that may be set by - /// `SanitizerCoverage` in [`COUNTERS_MAPS`] + /// `SanitizerCoverage` in [`super::COUNTERS_MAPS`] #[derive(Serialize, Deserialize, Debug)] #[allow(clippy::unsafe_derive_deserialize)] pub struct CountersMultiMapObserver { @@ -124,20 +138,18 @@ mod observers { iter_idx: usize, } - impl Observer for CountersMultiMapObserver + impl Observer for CountersMultiMapObserver where - S: UsesInput, Self: MapObserver, { #[inline] - fn pre_exec(&mut self, _state: &mut S, _input: &S::Input) -> Result<(), Error> { + fn pre_exec(&mut self, _state: &mut S, _input: &I) -> Result<(), Error> { self.reset_map() } } - impl Observer for CountersMultiMapObserver + impl Observer for CountersMultiMapObserver where - S: UsesInput, Self: MapObserver, { // in differential mode, we are *not* responsible for resetting the map! @@ -159,7 +171,7 @@ mod observers { impl Hash for CountersMultiMapObserver { fn hash(&self, hasher: &mut H) { - for map in unsafe { &*addr_of!(COUNTERS_MAPS) } { + for map in unsafe { &*counter_maps_ptr() } { let slice = map.as_slice(); let ptr = slice.as_ptr(); let map_size = slice.len() / size_of::(); @@ -190,7 +202,7 @@ mod observers { let elem = self.intervals.query(idx..=idx).next().unwrap(); let i = elem.value; let j = idx - elem.interval.start; - unsafe { (*addr_of!(COUNTERS_MAPS[*i])).as_slice()[j] } + unsafe { (*counter_maps_ptr())[*i].as_slice()[j] } } #[inline] @@ -198,7 +210,7 @@ mod observers { let elem = self.intervals.query_mut(idx..=idx).next().unwrap(); let i = elem.value; let j = idx - elem.interval.start; - unsafe { (*addr_of_mut!(COUNTERS_MAPS[*i])).as_slice_mut()[j] = val }; + unsafe { (*counter_maps_ptr_mut())[*i].as_slice_mut()[j] = val }; } #[inline] @@ -209,7 +221,7 @@ mod observers { fn count_bytes(&self) -> u64 { let initial = self.initial(); let mut res = 0; - for map in unsafe { &*addr_of!(COUNTERS_MAPS) } { + for map in unsafe { &*counter_maps_ptr() } { for x in map.as_slice() { if *x != initial { res += 1; @@ -226,7 +238,7 @@ mod observers { fn reset_map(&mut self) -> Result<(), Error> { let initial = self.initial(); - for map in unsafe { &mut *addr_of_mut!(COUNTERS_MAPS) } { + for map in unsafe { &mut *counter_maps_ptr_mut() } { for x in map.as_slice_mut() { *x = initial; } @@ -267,7 +279,7 @@ mod observers { fn maybe_differential(name: &'static str) -> Self { let mut idx = 0; let mut intervals = IntervalTree::new(); - for (v, x) in unsafe { &*addr_of!(COUNTERS_MAPS) }.iter().enumerate() { + for (v, x) in unsafe { &*counter_maps_ptr() }.iter().enumerate() { let l = x.as_slice().len(); intervals.insert(idx..(idx + l), v); idx += l; @@ -303,7 +315,7 @@ mod observers { let mut idx = 0; let mut v = 0; let mut intervals = IntervalTree::new(); - unsafe { &mut *addr_of_mut!(COUNTERS_MAPS) } + unsafe { &mut *counter_maps_ptr_mut() } .iter_mut() .for_each(|m| { let l = m.as_slice_mut().len(); @@ -327,7 +339,10 @@ mod observers { type IntoIter = Flatten>>; fn as_iter(&'it self) -> Self::IntoIter { - unsafe { COUNTERS_MAPS.iter().flatten() } + unsafe { + let counters_maps = &*counter_maps_ptr(); + counters_maps.iter().flatten() + } } } @@ -336,7 +351,10 @@ mod observers { type IntoIterMut = Flatten>>; fn as_iter_mut(&'it mut self) -> Self::IntoIterMut { - unsafe { COUNTERS_MAPS.iter_mut().flatten() } + unsafe { + let counters_maps = &mut *counter_maps_ptr_mut(); + counters_maps.iter_mut().flatten() + } } } @@ -345,7 +363,7 @@ mod observers { type IntoIter = Flatten>>; fn into_iter(self) -> Self::IntoIter { - unsafe { &*addr_of!(COUNTERS_MAPS) }.iter().flatten() + unsafe { &*counter_maps_ptr() }.iter().flatten() } } @@ -356,9 +374,7 @@ mod observers { type IntoIter = Flatten>>; fn into_iter(self) -> Self::IntoIter { - unsafe { &mut *addr_of_mut!(COUNTERS_MAPS) } - .iter_mut() - .flatten() + unsafe { &mut *counter_maps_ptr_mut() }.iter_mut().flatten() } } @@ -376,12 +392,5 @@ mod observers { } } - impl DifferentialObserver for CountersMultiMapObserver - where - Self: MapObserver, - OTA: ObserversTuple, - OTB: ObserversTuple, - S: UsesInput, - { - } + impl DifferentialObserver for CountersMultiMapObserver {} } diff --git a/libafl_targets/src/sancov_cmp.c b/libafl_targets/src/sancov_cmp.c index 42eb6aa3ee..4720894a1a 100644 --- a/libafl_targets/src/sancov_cmp.c +++ b/libafl_targets/src/sancov_cmp.c @@ -8,60 +8,46 @@ #include "cmplog.h" #endif -void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2) { - uintptr_t k = RETADDR; - k = (k >> 4) ^ (k << 8); - +// Note: for RETADDR to give us the fuzz target caller address we need +// to guarantee that this code is inlined. `inline` keyword provides +// no such guarantees, but a macro does. #ifdef SANCOV_VALUE_PROFILE - k &= CMP_MAP_SIZE - 1; - __libafl_targets_value_profile1(k, arg1, arg2); + #define SANCOV_VALUE_PROFILE_CALL(k, arg_size, arg1, arg2, arg1_is_const) \ + k &= CMP_MAP_SIZE - 1; \ + __libafl_targets_value_profile1(k, arg1, arg2); +#else + #define SANCOV_VALUE_PROFILE_CALL(k, arg_size, arg1, arg2, arg1_is_const) #endif + #ifdef SANCOV_CMPLOG - k &= CMPLOG_MAP_W - 1; - cmplog_instructions_checked(k, 1, (uint64_t)arg1, (uint64_t)arg2); + #define SANCOV_CMPLOG_CALL(k, arg_size, arg1, arg2, arg1_is_const) \ + k &= CMPLOG_MAP_W - 1; \ + cmplog_instructions_checked(k, arg_size, (uint64_t)arg1, (uint64_t)arg2, arg1_is_const); +#else + #define SANCOV_CMPLOG_CALL(k, arg_size, arg1, arg2, arg1_is_const) #endif + +#define HANDLE_SANCOV_TRACE_CMP(arg_size, arg1, arg2, arg1_is_const) { \ + uintptr_t k = RETADDR; \ + k = (k >> 4) ^ (k << 8); \ + SANCOV_VALUE_PROFILE_CALL(k, arg_size, arg1, arg2, arg1_is_const) \ + SANCOV_CMPLOG_CALL(k, arg_size, arg1, arg2, arg1_is_const) \ +} + +void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2) { + HANDLE_SANCOV_TRACE_CMP(1, arg1, arg2, 0); } void __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2) { - uintptr_t k = RETADDR; - k = (k >> 4) ^ (k << 8); - -#ifdef SANCOV_VALUE_PROFILE - k &= CMP_MAP_SIZE - 1; - __libafl_targets_value_profile2(k, arg1, arg2); -#endif -#ifdef SANCOV_CMPLOG - k &= CMPLOG_MAP_W - 1; - cmplog_instructions_checked(k, 2, (uint64_t)arg1, (uint64_t)arg2); -#endif + HANDLE_SANCOV_TRACE_CMP(2, arg1, arg2, 0); } void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2) { - uintptr_t k = RETADDR; - k = (k >> 4) ^ (k << 8); - -#ifdef SANCOV_VALUE_PROFILE - k &= CMP_MAP_SIZE - 1; - __libafl_targets_value_profile4(k, arg1, arg2); -#endif -#ifdef SANCOV_CMPLOG - k &= CMPLOG_MAP_W - 1; - cmplog_instructions_checked(k, 4, (uint64_t)arg1, (uint64_t)arg2); -#endif + HANDLE_SANCOV_TRACE_CMP(4, arg1, arg2, 0); } void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2) { - uintptr_t k = RETADDR; - k = (k >> 4) ^ (k << 8); - -#ifdef SANCOV_VALUE_PROFILE - k &= CMP_MAP_SIZE - 1; - __libafl_targets_value_profile8(k, arg1, arg2); -#endif -#ifdef SANCOV_CMPLOG - k &= CMPLOG_MAP_W - 1; - cmplog_instructions_checked(k, 8, (uint64_t)arg1, (uint64_t)arg2); -#endif + HANDLE_SANCOV_TRACE_CMP(8, arg1, arg2, 0); } void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases) { @@ -94,25 +80,26 @@ void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases) { #endif #ifdef SANCOV_CMPLOG k &= CMPLOG_MAP_W - 1; - cmplog_instructions_checked(k, cases[1] / 8, val, cases[i + 2]); + // Note: cases[i + 2] are the constant values, so keep them in arg1 and indicate that it's const + cmplog_instructions_checked(k, cases[1] / 8, cases[i + 2], val, 1); #endif } } void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2) { - __sanitizer_cov_trace_cmp1(arg1, arg2); + HANDLE_SANCOV_TRACE_CMP(1, arg1, arg2, 1); } void __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2) { - __sanitizer_cov_trace_cmp2(arg1, arg2); + HANDLE_SANCOV_TRACE_CMP(2, arg1, arg2, 1); } void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2) { - __sanitizer_cov_trace_cmp4(arg1, arg2); + HANDLE_SANCOV_TRACE_CMP(4, arg1, arg2, 1); } void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2) { - __sanitizer_cov_trace_cmp8(arg1, arg2); + HANDLE_SANCOV_TRACE_CMP(8, arg1, arg2, 1); } #pragma GCC diagnostic push diff --git a/libafl_targets/src/sancov_pcguard.rs b/libafl_targets/src/sancov_pcguard.rs index 0bd584d3f6..3fc2e63adc 100644 --- a/libafl_targets/src/sancov_pcguard.rs +++ b/libafl_targets/src/sancov_pcguard.rs @@ -1,11 +1,15 @@ //! [`LLVM` `PcGuard`](https://clang.llvm.org/docs/SanitizerCoverage.html#tracing-pcs-with-guards) runtime for `LibAFL`. #[rustversion::nightly] -#[cfg(feature = "sancov_ngram4")] +#[cfg(any(feature = "sancov_ngram4", feature = "sancov_ngram8"))] use core::simd::num::SimdUint; -use core::{mem::align_of, ptr, slice}; +use core::{mem::align_of, slice}; -#[cfg(any(feature = "sancov_ngram4", feature = "sancov_ctx"))] +#[cfg(any( + feature = "sancov_ngram4", + feature = "sancov_ctx", + feature = "sancov_ngram8" +))] use libafl::executors::{hooks::ExecutorHook, HasObservers}; #[cfg(any( @@ -14,14 +18,15 @@ use libafl::executors::{hooks::ExecutorHook, HasObservers}; feature = "sancov_pcguard_hitcounts", feature = "sancov_ctx", feature = "sancov_ngram4", + feature = "sancov_ngram8", ))] use crate::coverage::EDGES_MAP; use crate::coverage::MAX_EDGES_FOUND; -#[cfg(feature = "sancov_ngram4")] +#[cfg(any(feature = "sancov_ngram4", feature = "sancov_ngram8"))] #[allow(unused)] -use crate::EDGES_MAP_SIZE_IN_USE; +use crate::EDGES_MAP_DEFAULT_SIZE; #[cfg(feature = "pointer_maps")] -use crate::{coverage::EDGES_MAP_PTR, EDGES_MAP_SIZE_MAX}; +use crate::{coverage::EDGES_MAP_PTR, EDGES_MAP_ALLOCATED_SIZE}; #[cfg(all(feature = "sancov_pcguard_edges", feature = "sancov_pcguard_hitcounts"))] #[cfg(not(any(doc, feature = "clippy")))] @@ -61,6 +66,9 @@ pub static SHR_4: Ngram4 = Ngram4::from_array([1, 1, 1, 1]); #[rustversion::nightly] pub static SHR_8: Ngram8 = Ngram8::from_array([1, 1, 1, 1, 1, 1, 1, 1]); +static mut PC_TABLES: Vec<&'static [PcTableEntry]> = Vec::new(); + +use alloc::vec::Vec; #[cfg(any( feature = "sancov_ngram4", feature = "sancov_ngram8", @@ -179,19 +187,23 @@ unsafe fn update_ngram(pos: usize) -> usize { let mut reduced = pos; #[cfg(feature = "sancov_ngram4")] { - PREV_ARRAY_4 = PREV_ARRAY_4.rotate_elements_right::<1>(); - PREV_ARRAY_4.shl_assign(SHR_4); - PREV_ARRAY_4.as_mut_array()[0] = pos as u32; - reduced = PREV_ARRAY_4.reduce_xor() as usize; + let prev_array_4_ptr = &raw mut PREV_ARRAY_4; + let prev_array_4 = &mut *prev_array_4_ptr; + *prev_array_4 = prev_array_4.rotate_elements_right::<1>(); + prev_array_4.shl_assign(SHR_4); + prev_array_4.as_mut_array()[0] = pos as u32; + reduced = prev_array_4.reduce_xor() as usize; } #[cfg(feature = "sancov_ngram8")] { - PREV_ARRAY_8 = PREV_ARRAY_8.rotate_elements_right::<1>(); - PREV_ARRAY_8.shl_assign(SHR_8); - PREV_ARRAY_8.as_mut_array()[0] = pos as u32; - reduced = PREV_ARRAY_8.reduce_xor() as usize; + let prev_array_8_ptr = &raw mut PREV_ARRAY_8; + let prev_array_8 = &mut *prev_array_8_ptr; + *prev_array_8 = prev_array_8.rotate_elements_right::<1>(); + prev_array_8.shl_assign(SHR_8); + prev_array_8.as_mut_array()[0] = pos as u32; + reduced = prev_array_8.reduce_xor() as usize; } - reduced %= EDGES_MAP_SIZE_IN_USE; + reduced %= EDGES_MAP_DEFAULT_SIZE; reduced } @@ -220,13 +232,13 @@ pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard(guard: *mut u32) { #[cfg(any(feature = "sancov_ngram4", feature = "sancov_ngram8"))] { pos = update_ngram(pos); - // println!("Wrinting to {} {}", pos, EDGES_MAP_SIZE_IN_USE); + // println!("Wrinting to {} {}", pos, EDGES_MAP_DEFAULT_SIZE); } #[cfg(feature = "sancov_ctx")] { pos ^= __afl_prev_ctx as usize; - // println!("Wrinting to {} {}", pos, EDGES_MAP_SIZE_IN_USE); + // println!("Wrinting to {} {}", pos, EDGES_MAP_DEFAULT_SIZE); } #[cfg(feature = "pointer_maps")] @@ -243,15 +255,18 @@ pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard(guard: *mut u32) { } } #[cfg(not(feature = "pointer_maps"))] + #[cfg(any(feature = "sancov_pcguard_hitcounts", feature = "sancov_pcguard_edges"))] { + let edges_map_ptr = &raw mut EDGES_MAP; + let edges_map = &mut *edges_map_ptr; #[cfg(feature = "sancov_pcguard_edges")] { - *EDGES_MAP.get_unchecked_mut(pos) = 1; + *(edges_map).get_unchecked_mut(pos) = 1; } #[cfg(feature = "sancov_pcguard_hitcounts")] { - let val = (*EDGES_MAP.get_unchecked(pos)).wrapping_add(1); - *EDGES_MAP.get_unchecked_mut(pos) = val; + let val = (*edges_map.get_unchecked(pos)).wrapping_add(1); + *edges_map.get_unchecked_mut(pos) = val; } } } @@ -264,7 +279,7 @@ pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard(guard: *mut u32) { pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard_init(mut start: *mut u32, stop: *mut u32) { #[cfg(feature = "pointer_maps")] if EDGES_MAP_PTR.is_null() { - EDGES_MAP_PTR = EDGES_MAP.as_mut_ptr(); + EDGES_MAP_PTR = &raw mut EDGES_MAP as *mut u8; } if start == stop || *start != 0 { @@ -277,33 +292,39 @@ pub unsafe extern "C" fn __sanitizer_cov_trace_pc_guard_init(mut start: *mut u32 #[cfg(feature = "pointer_maps")] { - MAX_EDGES_FOUND = MAX_EDGES_FOUND.wrapping_add(1) % EDGES_MAP_SIZE_MAX; + MAX_EDGES_FOUND = MAX_EDGES_FOUND.wrapping_add(1) % EDGES_MAP_ALLOCATED_SIZE; } #[cfg(not(feature = "pointer_maps"))] { + let edges_map_ptr = &raw const EDGES_MAP; + let edges_map_len = (*edges_map_ptr).len(); MAX_EDGES_FOUND = MAX_EDGES_FOUND.wrapping_add(1); - assert!((MAX_EDGES_FOUND <= EDGES_MAP.len()), "The number of edges reported by SanitizerCoverage exceed the size of the edges map ({}). Use the LIBAFL_EDGES_MAP_SIZE_IN_USE env to increase it at compile time.", EDGES_MAP.len()); + assert!((MAX_EDGES_FOUND <= edges_map_len), "The number of edges reported by SanitizerCoverage exceed the size of the edges map ({edges_map_len}). Use the LIBAFL_EDGES_MAP_DEFAULT_SIZE env to increase it at compile time."); } } } -static mut PCS_BEG: *const usize = ptr::null(); -static mut PCS_END: *const usize = ptr::null(); - #[no_mangle] unsafe extern "C" fn __sanitizer_cov_pcs_init(pcs_beg: *const usize, pcs_end: *const usize) { // "The Unsafe Code Guidelines also notably defines that usize and isize are respectively compatible with uintptr_t and intptr_t defined in C." - assert!( - pcs_beg == PCS_BEG || PCS_BEG.is_null(), - "__sanitizer_cov_pcs_init can be called only once." + let len = pcs_end.offset_from(pcs_beg); + let Ok(len) = usize::try_from(len) else { + panic!("Invalid PC Table bounds - start: {pcs_beg:x?} end: {pcs_end:x?}") + }; + assert_eq!( + len % 2, + 0, + "PC Table size is not evens - start: {pcs_beg:x?} end: {pcs_end:x?}" ); - assert!( - pcs_end == PCS_END || PCS_END.is_null(), - "__sanitizer_cov_pcs_init can be called only once." + assert_eq!( + (pcs_beg as usize) % align_of::(), + 0, + "Unaligned PC Table - start: {pcs_beg:x?} end: {pcs_end:x?}" ); - PCS_BEG = pcs_beg; - PCS_END = pcs_end; + let pc_tables_ptr = &raw mut PC_TABLES; + let pc_tables = &mut *pc_tables_ptr; + pc_tables.push(slice::from_raw_parts(pcs_beg as *const PcTableEntry, len)); } /// An entry to the `sanitizer_cov` `pc_table` @@ -328,33 +349,13 @@ impl PcTableEntry { } } -/// Returns a slice containing the PC table. -#[must_use] -pub fn sanitizer_cov_pc_table() -> Option<&'static [PcTableEntry]> { +/// Returns an iterator over the PC tables. If no tables were registered, this will be empty. +pub fn sanitizer_cov_pc_table<'a>() -> impl Iterator { // SAFETY: Once PCS_BEG and PCS_END have been initialized, will not be written to again. So // there's no TOCTOU issue. unsafe { - if PCS_BEG.is_null() || PCS_END.is_null() { - return None; - } - let len = PCS_END.offset_from(PCS_BEG); - assert!( - len > 0, - "Invalid PC Table bounds - start: {PCS_BEG:x?} end: {PCS_END:x?}" - ); - assert_eq!( - len % 2, - 0, - "PC Table size is not evens - start: {PCS_BEG:x?} end: {PCS_END:x?}" - ); - assert_eq!( - (PCS_BEG as usize) % align_of::(), - 0, - "Unaligned PC Table - start: {PCS_BEG:x?} end: {PCS_END:x?}" - ); - Some(slice::from_raw_parts( - PCS_BEG as *const PcTableEntry, - (len / 2).try_into().unwrap(), - )) + let pc_tables_ptr = &raw const PC_TABLES; + let pc_tables = &*pc_tables_ptr; + pc_tables.iter().copied() } } diff --git a/libafl_targets/src/windows_asan.rs b/libafl_targets/src/windows_asan.rs index 28244d9b89..3ba804dd3e 100644 --- a/libafl_targets/src/windows_asan.rs +++ b/libafl_targets/src/windows_asan.rs @@ -1,10 +1,13 @@ //! Setup asan death callbback use libafl::{ + corpus::Corpus, events::{EventFirer, EventRestarter}, executors::{hooks::windows::windows_asan_handler::asan_death_handler, Executor, HasObservers}, feedbacks::Feedback, - state::{HasCorpus, HasExecutions, HasSolutions}, + inputs::UsesInput, + observers::ObserversTuple, + state::{HasCorpus, HasExecutions, HasSolutions, UsesState}, HasObjective, }; @@ -31,9 +34,12 @@ pub unsafe fn setup_asan_callback(_executor: &E, _event_mgr: &EM, where E: Executor + HasObservers, EM: EventFirer + EventRestarter, - OF: Feedback, + OF: Feedback, E::State: HasSolutions + HasCorpus + HasExecutions, + E::Observers: ObserversTuple<::Input, E::State>, Z: HasObjective, + <::State as HasSolutions>::Solutions: Corpus, //delete me + <<::State as HasCorpus>::Corpus as Corpus>::Input: Clone, //delete me { __sanitizer_set_death_callback(Some(asan_death_handler::)); } diff --git a/libafl_tinyinst/Cargo.toml b/libafl_tinyinst/Cargo.toml index dff5d7f86f..86565f6bd7 100644 --- a/libafl_tinyinst/Cargo.toml +++ b/libafl_tinyinst/Cargo.toml @@ -2,9 +2,18 @@ name = "libafl_tinyinst" version.workspace = true edition = "2021" -authors = ["elbiazo ", "Dongjia Zhang "] +authors = [ + "elbiazo ", + "Dongjia Zhang ", +] repository = "https://github.com/AFLplusplus/LibAFL/" -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] license = "MIT OR Apache-2.0" keywords = ["fuzzing", "testing", "security"] description = "TinyInst backend for libafl" @@ -12,17 +21,20 @@ description = "TinyInst backend for libafl" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -libafl = { path = "../libafl", version = "0.13.0", features = [ - "std", - "libafl_derive", +libafl = { workspace = true, default-features = true, features = [ + "std", + "libafl_derive", ] } -libafl_bolts = { path = "../libafl_bolts", version = "0.13.0", features = [ - "std", - "libafl_derive", +libafl_bolts = { workspace = true, default-features = true, features = [ + "std", + "libafl_derive", ] } tinyinst = { git = "https://github.com/AFLplusplus/tinyinst-rs" } # tinyinst-rs = { path = "../../tinyinst-rs" } -log = "0.4.20" +log = { workspace = true } [build-dependencies] -cmake = "0.1" +cmake = { workspace = true } + +[lints] +workspace = true diff --git a/libafl_tinyinst/src/executor.rs b/libafl_tinyinst/src/executor.rs index d293156329..6dca42c5ae 100644 --- a/libafl_tinyinst/src/executor.rs +++ b/libafl_tinyinst/src/executor.rs @@ -3,7 +3,7 @@ use core::{marker::PhantomData, ptr, time::Duration}; use libafl::{ executors::{Executor, ExitKind, HasObservers}, inputs::HasTargetBytes, - observers::{ObserversTuple, UsesObservers}, + observers::ObserversTuple, state::{HasExecutions, State, UsesState}, Error, }; @@ -29,10 +29,10 @@ where map: Option<::ShMem>, } -impl<'a> TinyInstExecutor<(), NopShMemProvider, ()> { +impl TinyInstExecutor<(), NopShMemProvider, ()> { /// Create a builder for [`TinyInstExecutor`] #[must_use] - pub fn builder() -> TinyInstExecutorBuilder<'a, NopShMemProvider> { + pub fn builder<'a>() -> TinyInstExecutorBuilder<'a, NopShMemProvider> { TinyInstExecutorBuilder::new() } } @@ -115,7 +115,7 @@ pub struct TinyInstExecutorBuilder<'a, SP> { const MAX_FILE: usize = 1024 * 1024; const SHMEM_FUZZ_HDR_SIZE: usize = 4; -impl<'a> Default for TinyInstExecutorBuilder<'a, NopShMemProvider> { +impl Default for TinyInstExecutorBuilder<'_, NopShMemProvider> { fn default() -> Self { Self::new() } @@ -150,7 +150,7 @@ impl<'a> TinyInstExecutorBuilder<'a, NopShMemProvider> { } } -impl<'a, SP> TinyInstExecutorBuilder<'a, SP> +impl TinyInstExecutorBuilder<'_, SP> where SP: ShMemProvider, { @@ -320,8 +320,10 @@ impl HasObservers for TinyInstExecutor where S: State, SP: ShMemProvider, - OT: ObserversTuple, + OT: ObserversTuple, { + type Observers = OT; + fn observers(&self) -> RefIndexable<&Self::Observers, Self::Observers> { RefIndexable::from(&self.observers) } @@ -337,11 +339,3 @@ where { type State = S; } -impl UsesObservers for TinyInstExecutor -where - OT: ObserversTuple, - S: State, - SP: ShMemProvider, -{ - type Observers = OT; -} diff --git a/libafl_tinyinst/src/lib.rs b/libafl_tinyinst/src/lib.rs index d2620db3f9..dc22e7c31a 100644 --- a/libafl_tinyinst/src/lib.rs +++ b/libafl_tinyinst/src/lib.rs @@ -2,25 +2,6 @@ The tinyinst module for `LibAFL`. */ -#![warn(clippy::cargo)] -#![deny(clippy::cargo_common_metadata)] -#![deny(rustdoc::broken_intra_doc_links)] -#![deny(clippy::all)] -#![deny(clippy::pedantic)] -#![forbid(unexpected_cfgs)] -#![allow( - clippy::unreadable_literal, - clippy::type_repetition_in_bounds, - clippy::missing_errors_doc, - clippy::cast_possible_truncation, - clippy::used_underscore_binding, - clippy::ptr_as_ptr, - clippy::missing_panics_doc, - clippy::missing_docs_in_private_items, - clippy::module_name_repetitions, - clippy::unreadable_literal, - clippy::negative_feature_names -)] #![cfg_attr(not(test), warn( missing_debug_implementations, missing_docs, @@ -60,9 +41,6 @@ The tinyinst module for `LibAFL`. while_true ) )] -// Till they fix this buggy lint in clippy -#![allow(clippy::borrow_as_ptr)] -#![allow(clippy::borrow_deref_ref)] /// Tinyinst executor pub mod executor; diff --git a/scripts/build_all_fuzzers.sh b/scripts/build_all_fuzzers.sh index 08c59bfa10..4d8d5b1fb1 100755 --- a/scripts/build_all_fuzzers.sh +++ b/scripts/build_all_fuzzers.sh @@ -5,8 +5,8 @@ cd "$SCRIPT_DIR/.." || exit 1 # TODO: This should be rewritten in rust, a Makefile, or some platform-independent language if [[ -z "${RUN_ON_CI}" ]]; then - fuzzers=$(find ./fuzzers -mindepth 1 -maxdepth 1 -type d) - backtrace_fuzzers=$(find ./fuzzers/backtrace_baby_fuzzers -mindepth 1 -maxdepth 1 -type d) + fuzzers=$(find ./fuzzers -mindepth 2 -maxdepth 2 -type d) + backtrace_fuzzers=$(find ./fuzzers/baby/backtrace_baby_fuzzers -mindepth 1 -maxdepth 1 -type d) else cargo build -p build_and_test_fuzzers fuzzers=$(cargo run -p build_and_test_fuzzers -- "remotes/origin/main" "HEAD^") diff --git a/scripts/check_md_links.sh b/scripts/check_md_links.sh new file mode 100755 index 0000000000..78b19cf0da --- /dev/null +++ b/scripts/check_md_links.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +LIBAFL_DIR=$(realpath "$SCRIPT_DIR/..") + +echo "[*] Checking MD links..." + +cd "$LIBAFL" || exit 1 + +if ! command -v linkspector > /dev/null; then + echo "Error: install linkspector to check MD file links." + exit 1 +fi + +linkspector check -c "${LIBAFL_DIR}/.github/.linkspector.yml" || exit 1 + +echo "[*] Done :)" diff --git a/scripts/check_tested_fuzzers.sh b/scripts/check_tested_fuzzers.sh index 8b33c42931..a191f01d9f 100755 --- a/scripts/check_tested_fuzzers.sh +++ b/scripts/check_tested_fuzzers.sh @@ -11,8 +11,8 @@ while read -r fuzzdir; do echo "Fuzzer ${fuzzdir} is explicitly ignored" fi done < <( - find ./fuzzers -mindepth 1 -maxdepth 1 -type d - find ./fuzzers/backtrace_baby_fuzzers -mindepth 1 -maxdepth 1 -type d + find ./fuzzers -mindepth 2 -maxdepth 2 -type d + find ./fuzzers/baby/backtrace_baby_fuzzers -mindepth 1 -maxdepth 1 -type d ) exit $ret \ No newline at end of file diff --git a/scripts/clippy.ps1 b/scripts/clippy.ps1 index 76be0592a5..880cc5f075 100644 --- a/scripts/clippy.ps1 +++ b/scripts/clippy.ps1 @@ -1,14 +1,76 @@ -cargo clippy --all --all-features --exclude libafl_nyx --exclude symcc_runtime --exclude runtime_test --exclude libafl_qemu --exclude libafl_libfuzzer --exclude libafl_qemu_sys --no-deps --tests --benches --examples -- ` - -D clippy::all ` - -D clippy::pedantic ` - -W clippy::similar_names ` - -A clippy::type_repetition_in_bounds ` - -A clippy::missing-errors-doc ` - -A clippy::cast-possible-truncation ` - -A clippy::used-underscore-binding ` - -A clippy::ptr-as-ptr ` - -A clippy::missing-panics-doc ` - -A clippy::missing-docs-in-private-items ` - -A clippy::unseparated-literal-suffix ` - -A clippy::module-name-repetitions ` - -A clippy::unreadable-literal +# Clippy Runner Script for PowerShell (Windows) + +$ErrorActionPreference = "Stop" # This is similar to set -e in Bash +$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path +Set-Location (Split-Path -Parent $ScriptDir) + +# Function to run Clippy on a single directory +function Run-Clippy { + param ( + [string]$dir + ) + Write-Host "Running Clippy on $dir" + Push-Location $dir + + $currentdir = $PWD.Path + Write-Host "Running Clippy in $currentdir" + + try { + $env:RUST_BACKTRACE = "full" + cargo +nightly clippy --all-features --no-deps --tests --examples --benches -- -Z macro-backtrace + + # Exit unsuccessfully on clippy error + if (!$?) { + exit 1 + } + } + finally { + Pop-Location + } +} + +# Define projects for Windows +$AllProjects = @( + "libafl_concolic/test/dump_constraints", + "libafl_concolic/test/runtime_test", + "libafl_libfuzzer", + "libafl_nyx", + "libafl_sugar", + "libafl_tinyinst" + "utils/build_and_test_fuzzers", + "utils/deexit", + "utils/libafl_benches", + "utils/gramatron/construct_automata" +) + +# Check if arguments were provided +if ($args.Count -eq 0) { + # No arguments provided, run on all projects + $Projects = $AllProjects +} +else { + # Arguments provided, split the input string into an array + $Projects = $args[0] -split ',' +} + +# First run it on all default members +$env:RUST_BACKTRACE = "full" +cargo +nightly clippy --all-features --no-deps --tests --examples --benches -- -Z macro-backtrace + +# Exit unsuccessfully on clippy error +if (!$?) { + exit 1 +} + +# Loop through each project and run Clippy +foreach ($project in $Projects) { + $project = $project.Trim() + if (Test-Path $project -PathType Container) { + Run-Clippy $project + } + else { + Write-Host "Warning: Directory $project does not exist. Skipping." + } +} + +Write-Host "Clippy run completed for all specified projects." \ No newline at end of file diff --git a/scripts/clippy.sh b/scripts/clippy.sh index e37554a8f5..44bac06fc8 100755 --- a/scripts/clippy.sh +++ b/scripts/clippy.sh @@ -1,38 +1,68 @@ #!/bin/bash -SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" cd "$SCRIPT_DIR/.." || exit 1 +CLIPPY_CMD="RUST_BACKTRACE=full cargo +nightly clippy --no-deps --tests --examples --benches" +RUSTC_FLAGS="-Z macro-backtrace" + set -e +# Function to run Clippy on a single directory +run_clippy() { + local dir="$1" + local features="$2" + echo "Running Clippy on $dir" + pushd "$dir" || return 1 -RUST_BACKTRACE=full cargo +nightly clippy --all --all-features --exclude libafl_nyx --exclude symcc_runtime --exclude runtime_test --no-deps --tests --examples --benches -- -Z macro-backtrace \ - -D clippy::all \ - -D clippy::pedantic \ - -W clippy::similar_names \ - -A clippy::type_repetition_in_bounds \ - -A clippy::missing-errors-doc \ - -A clippy::cast-possible-truncation \ - -A clippy::used-underscore-binding \ - -A clippy::ptr-as-ptr \ - -A clippy::missing-panics-doc \ - -A clippy::missing-docs-in-private-items \ - -A clippy::unseparated-literal-suffix \ - -A clippy::module-name-repetitions \ - -A clippy::unreadable-literal + eval "$CLIPPY_CMD ${features:+"$features"} -- $RUSTC_FLAGS" + popd || return 1 +} + +# Define projects based on the operating system if [[ "$OSTYPE" == "linux-gnu"* ]]; then - cd libafl_libfuzzer/libafl_libfuzzer_runtime - RUST_BACKTRACE=full cargo +nightly clippy --all --all-features --exclude libafl_nyx --exclude symcc_runtime --exclude runtime_test --no-deps --tests --examples --benches -- -Z macro-backtrace \ - -D clippy::all \ - -D clippy::pedantic \ - -W clippy::similar_names \ - -A clippy::type_repetition_in_bounds \ - -A clippy::missing-errors-doc \ - -A clippy::cast-possible-truncation \ - -A clippy::used-underscore-binding \ - -A clippy::ptr-as-ptr \ - -A clippy::missing-panics-doc \ - -A clippy::missing-docs-in-private-items \ - -A clippy::unseparated-literal-suffix \ - -A clippy::module-name-repetitions \ - -A clippy::unreadable-literal + ALL_PROJECTS=( + "libafl_concolic/symcc_runtime" + "libafl_concolic/symcc_libafl" + "libafl_frida" + "libafl_libfuzzer" + "libafl_nyx" + "libafl_qemu" + "libafl_tinyinst" + "libafl_qemu/libafl_qemu_build" + "libafl_qemu/libafl_qemu_sys" + ) fi + +# Do not use --all-features for the following projects +NO_ALL_FEATURES=( + "libafl_qemu" +) + +if [ "$#" -eq 0 ]; then + # No arguments provided, run on all projects + PROJECTS=("${ALL_PROJECTS[@]}") +else + # Arguments provided, split the input string into an array + IFS=',' read -ra PROJECTS <<<"$1" +fi + +# First run it on all +eval "$CLIPPY_CMD --workspace -- $RUSTC_FLAGS" + +# Loop through each project and run Clippy +for project in "${PROJECTS[@]}"; do + # Trim leading and trailing whitespace + project=$(echo "$project" | sed 's/^[[:space:]]*//;s/[[:space:]]*$//') + features="--all-features" + if [[ " ${NO_ALL_FEATURES[*]} " =~ ${project} ]]; then + features="--features=clippy" + fi + if [ -d "$project" ]; then + run_clippy "$project" $features + else + echo "Warning: Directory $project does not exist. Skipping." + fi +done + +echo "Clippy run completed for all specified projects." diff --git a/scripts/createAliases.sh b/scripts/createAliases.sh new file mode 100755 index 0000000000..1e89dae157 --- /dev/null +++ b/scripts/createAliases.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# creates a symbolic link from bin-x.x to bin +# This just strips off last 3 characters when creating a link + +LLVMFILES="/usr/bin/llvm*" +CLANGFILES="/usr/bin/clang*" +LLC=/usr/bin/llc-$1 +OPT=/usr/bin/opt-$1 +LLD=/usr/bin/lld-$1 + +for f in $LLVMFILES $CLANGFILES $LLC $OPT $LLD +do + link=${f::-3} + echo "linking" "$f" "to" "$link" + ln -s "$f" "$link" + if [ -e "$f" ] + then cp "$link" /usr/local/bin/ + fi +done diff --git a/scripts/fmt_all.sh b/scripts/fmt_all.sh index c311443700..963f15c4ce 100755 --- a/scripts/fmt_all.sh +++ b/scripts/fmt_all.sh @@ -3,22 +3,36 @@ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" LIBAFL_DIR=$(realpath "$SCRIPT_DIR/..") +cd "${LIBAFL_DIR}" || exit 1 + if [ "$1" = "check" ]; then - cargo run --manifest-path "$LIBAFL_DIR/utils/libafl_fmt/Cargo.toml" --release -- -c --verbose + cargo run --manifest-path "$LIBAFL_DIR/utils/libafl_fmt/Cargo.toml" --release -- -c --verbose || exit 1 else - cargo run --manifest-path "$LIBAFL_DIR/utils/libafl_fmt/Cargo.toml" --release -- --verbose + cargo run --manifest-path "$LIBAFL_DIR/utils/libafl_fmt/Cargo.toml" --release -- --verbose || exit 1 fi -if command -v black > /dev/null; then - echo "[*] Formatting python files" - if ! black "$SCRIPT_DIR" - then - echo "Python format failed." - exit 1 - fi +if python3 -m black --version > /dev/null; then + BLACK_COMMAND="python3 -m black" +elif command -v black > /dev/null; then + BLACK_COMMAND="black" +fi +if [ -n "$BLACK_COMMAND" ]; then + echo "[*] Formatting python files" + if [ "$1" = "check" ]; then + $BLACK_COMMAND --check --diff "$LIBAFL_DIR" || exit 1 + else + $BLACK_COMMAND "$LIBAFL_DIR" || exit 1 + fi else - echo "Warning: python black not found. Formatting skipped for python." + echo -e "\n\033[1;33mWarning\033[0m: python black not found. Formatting skipped for python.\n" +fi + +if [ "$1" != "check" ]; then + if command -v taplo > /dev/null; then + echo "[*] Formatting TOML files" + taplo format + fi fi echo "[*] Done :)" diff --git a/scripts/parallellize_cargo_check.py b/scripts/parallellize_cargo_check.py index c70473e97c..f736c20a0c 100755 --- a/scripts/parallellize_cargo_check.py +++ b/scripts/parallellize_cargo_check.py @@ -4,6 +4,8 @@ import os import sys import math +LLVM_VERSION = "18" + # Current CI Runner ci_instances = 18 @@ -12,13 +14,17 @@ if len(sys.argv) != 2: instance_idx = int(sys.argv[1]) -# Set llvm config -os.environ["LLVM_CONFIG"] = "llvm-config" +# Set llvm config if it's not already set +if "LLVM_CONFIG" not in os.environ: + os.environ["LLVM_CONFIG"] = f"llvm-config-{LLVM_VERSION}" command = ( "DOCS_RS=1 cargo hack check --workspace --each-feature --clean-per-run " "--exclude-features=prelude,python,sancov_pcguard_edges,arm,aarch64,i386,be,systemmode,whole_archive " - "--no-dev-deps --exclude libafl_libfuzzer --print-command-list" + "--no-dev-deps --exclude libafl_libfuzzer --exclude libafl_qemu --exclude libafl_qemu_sys --print-command-list;" + "DOCS_RS=1 cargo hack check -p libafl_qemu -p libafl_qemu_sys --each-feature --clean-per-run " + "--exclude-features=prelude,python,sancov_pcguard_edges,arm,aarch64,i386,be,systemmode,whole_archive,slirp " + "--no-dev-deps --features usermode --print-command-list" ) # Run the command and capture the output @@ -33,8 +39,19 @@ for task in output[ ]: print("Running ", task) print(os.environ) + + if ( + "utils/libafl_jumper/Cargo.toml" in task + and "--no-default-features" in task + and "--features" not in task + ): + # ignore libafl_jumper no std + continue + if "libafl_frida" in task: # DOCS_RS is needed for libafl_frida to build without auto-download feature - cargo_check = subprocess.check_output(task, shell=True, text=True, env=dict(os.environ, DOCS_RS="1")) + cargo_check = subprocess.check_output( + task, shell=True, text=True, env=dict(os.environ, DOCS_RS="1") + ) else: - cargo_check = subprocess.check_output(task, shell=True, text=True) \ No newline at end of file + cargo_check = subprocess.check_output(task, shell=True, text=True) diff --git a/scripts/publish.sh b/scripts/publish.sh index fdd0fbbdbb..25b9e75d0c 100755 --- a/scripts/publish.sh +++ b/scripts/publish.sh @@ -22,6 +22,12 @@ cd .. || exit 1 sleep 20 +cd libafl_intelpt +cargo publish "$@" +cd .. || exit 1 + +sleep 20 + cd libafl cargo publish "$@" cd .. || exit 1 @@ -74,9 +80,9 @@ if git submodule status | grep "^-">/dev/null ; then \ fi cd libafl_concolic/symcc_runtime -cargo publish "$@" +cargo publish "$@" --allow-dirty cd ../.. || exit 1 cd libafl_libfuzzer -./publish.sh "$@" +cargo publish "$@" cd .. || exit 1 diff --git a/scripts/update_bindings.sh b/scripts/update_bindings.sh new file mode 100755 index 0000000000..911467e827 --- /dev/null +++ b/scripts/update_bindings.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" +cd "$SCRIPT_DIR/.." || exit 1 + +# Update LibAFL QEMU bindings +pushd libafl_qemu + LIBAFL_QEMU_GEN_STUBS=1 cargo +nightly build || exit 1 +popd \ No newline at end of file diff --git a/scripts/update_versions.py b/scripts/update_versions.py index 4483d3b46d..53f1b2edd2 100755 --- a/scripts/update_versions.py +++ b/scripts/update_versions.py @@ -30,7 +30,7 @@ for subdir, dirs, files in os.walk(os.getcwd()): continue for file in files: - if file != "Cargo.toml": + if file not in ["Cargo.toml", "pyproject.toml"]: continue fname = os.path.join(subdir, file) print(fname) diff --git a/utils/build_and_test_fuzzers/Cargo.toml b/utils/build_and_test_fuzzers/Cargo.toml index 38cf334956..d94698c089 100644 --- a/utils/build_and_test_fuzzers/Cargo.toml +++ b/utils/build_and_test_fuzzers/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "build_and_test_fuzzers" -version = "0.1.0" +version.workspace = true edition = "2021" description = "Get diffing fuzzers from the last commit" repository = "https://github.com/AFLplusplus/LibAFL/" @@ -10,5 +10,8 @@ keywords = ["ci"] categories = ["development-tools::testing"] [dependencies] -cargo_toml = "0.20" -walkdir = "2" +cargo_toml = "0.20.5" +walkdir = "2.5.0" + +[lints] +workspace = true diff --git a/utils/cfg_builder/build.py b/utils/cfg_builder/build.py index 070f15e30b..e4664549e4 100644 --- a/utils/cfg_builder/build.py +++ b/utils/cfg_builder/build.py @@ -7,7 +7,7 @@ import sys cfg = dict() -if 'CFG_OUTPUT_PATH' not in os.environ: +if "CFG_OUTPUT_PATH" not in os.environ: sys.exit("CFG_OUTPUT_PATH not set") input_path = os.environ["CFG_OUTPUT_PATH"] @@ -31,8 +31,7 @@ for mname, module in cfg.items(): fnname2SG = dict() # First, add all the intra-procedural edges - for (fname, v) in module['edges'].items(): - + for fname, v in module["edges"].items(): if fname not in fname2id: GG.add_node(f_ids, label=fname) fname2id[fname] = f_ids @@ -41,8 +40,7 @@ for mname, module in cfg.items(): sz = len(v) for idx in range(node_ids, node_ids + sz): G.add_node(idx) - G.nodes[idx]['label'] = mname + ' ' + \ - fname + ' ' + str(idx - node_ids) + G.nodes[idx]["label"] = mname + " " + fname + " " + str(idx - node_ids) node_id_list = list(range(node_ids, node_ids + sz)) node_ids += sz SG = G.subgraph(node_id_list) @@ -52,14 +50,14 @@ for mname, module in cfg.items(): G.add_edge(node_id_list[src], node_id_list[item]) # Next, build inter-procedural edges - for (fname, calls) in module['calls'].items(): - for (idx, target_fns) in calls.items(): + for fname, calls in module["calls"].items(): + for idx, target_fns in calls.items(): # G.nodes isn't sorted src = sorted(fnname2SG[fname].nodes())[0] + int(idx) for target_fn in target_fns: if target_fn in fnname2SG: - offset = module['entries'][target_fn] + offset = module["entries"][target_fn] dst = sorted(fnname2SG[target_fn].nodes)[0] + offset diff --git a/utils/deexit/Cargo.toml b/utils/deexit/Cargo.toml index d763cbf5a0..092fd52467 100644 --- a/utils/deexit/Cargo.toml +++ b/utils/deexit/Cargo.toml @@ -1,7 +1,10 @@ [package] -authors = ["Andrea Fioraldi ", "Dominik Maier "] +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] name = "deexit" -version = "0.1.0" +version.workspace = true edition = "2021" description = "DeExit: Replace exits with aborts to catch them during in-process fuzzing" documentation = "https://docs.rs/libafl" @@ -9,13 +12,22 @@ repository = "https://github.com/AFLplusplus/LibAFL/" readme = "../../README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "libafl", "ldpreload"] -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -log = "0.4.20" +log = { workspace = true } [lib] name = "deexit" -crate-type = ["cdylib"] \ No newline at end of file +crate-type = ["cdylib"] + +[lints] +workspace = true diff --git a/utils/desyscall/Cargo.toml b/utils/desyscall/Cargo.toml index 45d1855226..8bdf398142 100644 --- a/utils/desyscall/Cargo.toml +++ b/utils/desyscall/Cargo.toml @@ -1,10 +1,21 @@ [package] name = "desyscall" -version = "0.1.0" +version = "0.14.1" edition = "2021" +description = "DeSyscall: Hooks syscalls for reduces overhead during in-process fuzzing" +repository = "https://github.com/AFLplusplus/LibAFL/" +license = "MIT OR Apache-2.0" +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] +keywords = ["fuzzing", "libafl", "ldpreload"] [dependencies] -meminterval = "0.3" +meminterval = "0.4" libc = "0.2" [dev-dependencies] diff --git a/utils/desyscall/build.rs b/utils/desyscall/build.rs index 5ded512a8f..2c4cd7dc0d 100644 --- a/utils/desyscall/build.rs +++ b/utils/desyscall/build.rs @@ -3,14 +3,19 @@ use std::env; fn main() { + if cfg!(not(target_os = "linux")) { + println!("cargo:warning=Not supported!"); + return; + } + let out_dir = env::var_os("OUT_DIR").unwrap(); let out_dir = out_dir.to_string_lossy().to_string(); println!("cargo:rerun-if-changed=src/syscalls.c"); // Enforce clang for its -fsanitize-coverage support. - std::env::set_var("CC", "clang"); - std::env::set_var("CXX", "clang++"); + env::set_var("CC", "clang"); + env::set_var("CXX", "clang++"); cc::Build::new().file("src/syscalls.c").compile("syscalls"); println!("cargo:rerun-if-changed=src/syscalls.c"); diff --git a/utils/desyscall/src/file.rs b/utils/desyscall/src/file.rs index 5202e9efe5..0e146d9396 100644 --- a/utils/desyscall/src/file.rs +++ b/utils/desyscall/src/file.rs @@ -9,8 +9,11 @@ extern "C" { fn __libafl_raw_read(fd: c_int, buf: Pointer, count: size_t) -> ssize_t; } +/// # Safety +/// Call to functions using syscalls +#[allow(clippy::cast_possible_wrap)] #[no_mangle] -pub unsafe fn write(fd: c_int, buf: Pointer, count: size_t) -> ssize_t { +pub unsafe extern "C" fn write(fd: c_int, buf: Pointer, count: size_t) -> ssize_t { let ctx = Context::get(); if ctx.enabled && (fd == 1 || fd == 2) { @@ -20,11 +23,13 @@ pub unsafe fn write(fd: c_int, buf: Pointer, count: size_t) -> ssize_t { } } +/// # Safety +/// Call to functions using syscalls #[no_mangle] -pub unsafe fn read(fd: c_int, buf: Pointer, count: size_t) -> ssize_t { +pub unsafe extern "C" fn read(fd: c_int, buf: Pointer, count: size_t) -> ssize_t { let ctx = Context::get(); - if ctx.enabled && fd >= 0 && fd <= 2 { + if ctx.enabled && (0..=2).contains(&fd) { 0 } else { __libafl_raw_read(fd, buf, count) diff --git a/utils/desyscall/src/lib.rs b/utils/desyscall/src/lib.rs index 12531dcb41..d8ba74e37a 100644 --- a/utils/desyscall/src/lib.rs +++ b/utils/desyscall/src/lib.rs @@ -1,12 +1,20 @@ -use libc::{c_int, c_void}; -use meminterval::IntervalTree; +#[cfg(target_os = "linux")] use std::{mem::MaybeUninit, sync::Once}; +#[cfg(target_os = "linux")] +use libc::{c_int, c_void}; +#[cfg(target_os = "linux")] +use meminterval::IntervalTree; + +#[cfg(target_os = "linux")] pub mod file; +#[cfg(target_os = "linux")] pub mod mmap; +#[cfg(target_os = "linux")] pub type Pointer = *mut c_void; +#[cfg(target_os = "linux")] #[derive(Debug, Clone)] pub struct Mapping { prot: c_int, @@ -14,13 +22,23 @@ pub struct Mapping { mapped: bool, } +#[cfg(target_os = "linux")] pub struct Context { enabled: bool, mappings: IntervalTree, exit_hook: Option>, } +#[cfg(target_os = "linux")] +impl Default for Context { + fn default() -> Self { + Self::new() + } +} + +#[cfg(target_os = "linux")] impl Context { + #[must_use] pub fn new() -> Self { Self { enabled: false, @@ -68,18 +86,22 @@ impl Context { } } +#[cfg(target_os = "linux")] extern "C" { fn __libafl_raw_exit_group(status: c_int); } // void _exit(int status); +/// # Safety +/// Call to function using syscalls #[no_mangle] -pub unsafe fn _exit(status: c_int) { +#[cfg(target_os = "linux")] +pub unsafe extern "C" fn _exit(status: c_int) { let ctx = Context::get(); if ctx.enabled { if let Some(hook) = &mut ctx.exit_hook { - (hook)(status as i32); + (hook)(status); } } diff --git a/utils/desyscall/src/mmap.rs b/utils/desyscall/src/mmap.rs index caed5d8680..948d425cf0 100644 --- a/utils/desyscall/src/mmap.rs +++ b/utils/desyscall/src/mmap.rs @@ -1,6 +1,9 @@ +//! Stub out syscalls. Linux only. + +use std::ptr; + use libc::{c_int, c_void, off_t, size_t}; use meminterval::Interval; -use std::ptr; use crate::{Context, Mapping, Pointer}; @@ -36,8 +39,12 @@ extern "C" { fn __libafl_raw_madvise(addr: *mut c_void, length: size_t, advice: c_int) -> c_int; } +/// # Safety +/// Call to functions using syscalls #[no_mangle] -pub unsafe fn mmap( +#[allow(clippy::too_many_lines)] +#[cfg(not(windows))] +pub unsafe extern "C" fn mmap( addr: Pointer, length: size_t, prot: c_int, @@ -53,7 +60,10 @@ pub unsafe fn mmap( // validity checks if length == 0 || length % PAGE_SIZE != 0 || (addr as usize) % PAGE_SIZE != 0 { - *libc::__errno_location() = libc::EINVAL; + #[cfg(target_os = "linux")] + { + *libc::__errno_location() = libc::EINVAL; + } return libc::MAP_FAILED as Pointer; } @@ -66,7 +76,7 @@ pub unsafe fn mmap( continue; } if length <= entry.interval.end as usize - entry.interval.start as usize { - candidate = Some((entry.interval.clone(), entry.value.clone())); + candidate = Some((*entry.interval, entry.value.clone())); break; } } @@ -76,7 +86,7 @@ pub unsafe fn mmap( if length < size { ctx.mappings.delete(cand.0); - let end = cand.0.start.offset(length as isize); + let end = cand.0.start.add(length); ctx.mappings.insert( cand.0.start..end, Mapping { @@ -100,7 +110,7 @@ pub unsafe fn mmap( let ret = __libafl_raw_mmap(addr, length, prot, flags, fd, offset) as Pointer; if ret != libc::MAP_FAILED as Pointer { - let end = ret.offset(length as isize); + let end = ret.add(length); ctx.mappings.insert( ret..end, Mapping { @@ -119,7 +129,7 @@ pub unsafe fn mmap( return ret; } - let end = addr.offset(length as isize); + let end = addr.add(length); let mut prev: Option<(_, _)> = None; let mut fail = false; @@ -133,12 +143,10 @@ pub unsafe fn mmap( if entry.interval.start != p.0 { fail = true; } - } else { - if entry.interval.start > addr { - fail = true; - } else if entry.interval.start < addr { - reminder = Some((entry.interval.start, entry.value.clone())); - } + } else if entry.interval.start > addr { + fail = true; + } else if entry.interval.start < addr { + reminder = Some((entry.interval.start, entry.value.clone())); } if entry.value.prot != prot { fail = true; @@ -148,12 +156,13 @@ pub unsafe fn mmap( already_mapped = true; } - intervals.push(entry.interval.clone()); + intervals.push(*entry.interval); prev = Some((entry.interval.end, entry.value)); } let mut reminder_next = None; + #[allow(clippy::comparison_chain)] if let Some(p) = prev.take() { if p.0 < end { fail = true; @@ -216,8 +225,10 @@ pub unsafe fn mmap( ret } +/// # Safety +/// Call to functions using syscalls #[no_mangle] -pub unsafe fn munmap(addr: *mut c_void, length: size_t) -> c_int { +pub unsafe extern "C" fn munmap(addr: *mut c_void, length: size_t) -> c_int { let ctx = Context::get(); if !ctx.enabled { @@ -226,7 +237,10 @@ pub unsafe fn munmap(addr: *mut c_void, length: size_t) -> c_int { // validity checks if length == 0 || (addr as usize) % PAGE_SIZE != 0 { - *libc::__errno_location() = libc::EINVAL; + #[cfg(target_os = "linux")] + { + *libc::__errno_location() = libc::EINVAL; + } return -1; } let aligned_length = if length % PAGE_SIZE != 0 { @@ -234,7 +248,7 @@ pub unsafe fn munmap(addr: *mut c_void, length: size_t) -> c_int { } else { length }; - let end = addr.offset(aligned_length as isize); + let end = addr.add(aligned_length); ctx.disable(); @@ -286,7 +300,7 @@ pub unsafe fn munmap(addr: *mut c_void, length: size_t) -> c_int { new_entries.push((Interval::new(end, entry.interval.end), entry.value.clone())); } - intervals.push(entry.interval.clone()); + intervals.push(*entry.interval); } for interval in intervals { @@ -302,8 +316,10 @@ pub unsafe fn munmap(addr: *mut c_void, length: size_t) -> c_int { 0 } +/// # Safety +/// Calling to functions using syscalls #[no_mangle] -pub unsafe fn mprotect(addr: *mut c_void, length: size_t, prot: c_int) -> c_int { +pub unsafe extern "C" fn mprotect(addr: *mut c_void, length: size_t, prot: c_int) -> c_int { let ctx = Context::get(); if !ctx.enabled { @@ -316,7 +332,7 @@ pub unsafe fn mprotect(addr: *mut c_void, length: size_t, prot: c_int) -> c_int } else { length }; - let end = addr.offset(aligned_length as isize); + let end = addr.add(aligned_length); ctx.disable(); @@ -324,11 +340,9 @@ pub unsafe fn mprotect(addr: *mut c_void, length: size_t, prot: c_int) -> c_int if let Some(mut entry) = query_iter.next() { // cache the repeated mprotects on the same region - if entry.interval.start == addr && entry.interval.end == end { - if entry.value.prot == prot { - ctx.enable(); - return 0; - } + if entry.interval.start == addr && entry.interval.end == end && entry.value.prot == prot { + ctx.enable(); + return 0; } let ret = __libafl_raw_mprotect(addr, length, prot); @@ -385,7 +399,7 @@ pub unsafe fn mprotect(addr: *mut c_void, length: size_t, prot: c_int) -> c_int new_entries.push((Interval::new(end, entry.interval.end), entry.value.clone())); } - intervals.push(entry.interval.clone()); + intervals.push(*entry.interval); if let Some(next) = query_iter.next() { entry = next; @@ -427,10 +441,12 @@ pub unsafe fn mprotect(addr: *mut c_void, length: size_t, prot: c_int) -> c_int } } +/// # Safety +/// Call to functions using syscalls #[no_mangle] -pub unsafe fn madvise(addr: *mut c_void, length: size_t, advice: c_int) -> c_int { +#[cfg(not(windows))] +pub unsafe extern "C" fn madvise(addr: *mut c_void, length: size_t, advice: c_int) -> c_int { let ctx = Context::get(); - if ctx.enabled && advice == libc::MADV_DONTNEED { 0 } else { @@ -438,10 +454,11 @@ pub unsafe fn madvise(addr: *mut c_void, length: size_t, advice: c_int) -> c_int } } -#[cfg(test)] +#[cfg(all(test, target_os = "linux"))] mod tests { - use super::*; use rusty_fork::rusty_fork_test; + + use super::*; // cargo test -- --nocapture --test-threads=1 rusty_fork_test! { @@ -453,7 +470,7 @@ mod tests { let p = mmap(0x7ffff9f9e000usize as Pointer, 4096, 0x7, 0x22, 0, 0); assert!(p as isize != -1); - println!("Pre {:?}", p); + println!("Pre {p:?}", ); Context::get().print_mappings(); let r = munmap(p, 1); @@ -474,10 +491,10 @@ mod tests { let p = mmap(0x7ffff9f9e000usize as Pointer, PAGE_SIZE*4, 0x7, 0x22, 0, 0); assert!(p as isize != -1); - println!("Pre {:?}", p); + println!("Pre {p:?}",); Context::get().print_mappings(); - let r = munmap(p.offset(PAGE_SIZE as isize), PAGE_SIZE*2); + let r = munmap(p.add(PAGE_SIZE), PAGE_SIZE*2); assert!(r == 0); println!("Post"); @@ -495,19 +512,19 @@ mod tests { let p = mmap(0x7ffff9f9e000usize as Pointer, PAGE_SIZE*4, 0x7, 0x22, 0, 0); assert!(p as isize != -1); - println!("Pre {:?}", p); + println!("Pre {p:?}"); Context::get().print_mappings(); - let r = munmap(p.offset(PAGE_SIZE as isize), PAGE_SIZE*2); + let r = munmap(p.add(PAGE_SIZE), PAGE_SIZE*2); assert!(r == 0); println!("Post"); Context::get().print_mappings(); - let p = mmap(p.offset(PAGE_SIZE as isize), PAGE_SIZE, 0x1, 0x22, 0, 0); + let p = mmap(p.add(PAGE_SIZE), PAGE_SIZE, 0x1, 0x22, 0, 0); assert!(p as isize != -1); - println!("Remap {:?}", p); + println!("Remap {p:?}"); Context::get().print_mappings(); } } @@ -522,10 +539,10 @@ mod tests { let p = mmap(0 as Pointer, PAGE_SIZE*4, 0x7, 0x22, 0, 0); assert!(p as isize != -1); - println!("Pre {:?}", p); + println!("Pre {p:?}"); Context::get().print_mappings(); - let r = munmap(p.offset(PAGE_SIZE as isize), PAGE_SIZE*2); + let r = munmap(p.add(PAGE_SIZE), PAGE_SIZE*2); assert!(r == 0); println!("Post"); @@ -534,7 +551,7 @@ mod tests { let p = mmap(0 as Pointer, PAGE_SIZE, 0x7, 0x22, 0, 0); assert!(p as isize != -1); - println!("Remap {:?}", p); + println!("Remap {p:?}"); Context::get().print_mappings(); } } diff --git a/utils/desyscall/src/patch.c b/utils/desyscall/src/patch.c index 0e3fccafe2..08815added 100644 --- a/utils/desyscall/src/patch.c +++ b/utils/desyscall/src/patch.c @@ -35,10 +35,12 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. int __libafl_raw_mprotect(void *addr, size_t len, int prot); -void* mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset); -int munmap(void *addr, size_t length); -void *mremap(void *old_address, size_t old_size, size_t new_size, int flags, ... /* void *new_address */); -int mprotect(void *addr, size_t len, int prot); +void *mmap(void *addr, size_t length, int prot, int flags, int fd, + off_t offset); +int munmap(void *addr, size_t length); +void *mremap(void *old_address, size_t old_size, size_t new_size, int flags, + ... /* void *new_address */); +int mprotect(void *addr, size_t len, int prot); #ifdef __x86_64__ @@ -197,9 +199,9 @@ __attribute__((constructor)) void __libafl_hotpatch(void) { HOTPATCH(mmap) HOTPATCH(munmap) HOTPATCH(mprotect) - + HOTPATCH(write) - + HOTPATCH(_exit) #undef HOTPATCH diff --git a/utils/desyscall/src/syscalls.c b/utils/desyscall/src/syscalls.c index 1f52c05d0f..d0add7d925 100644 --- a/utils/desyscall/src/syscalls.c +++ b/utils/desyscall/src/syscalls.c @@ -3,34 +3,37 @@ #include #include -void* __libafl_raw_mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset) { - return (void*)syscall(SYS_mmap, addr, length, prot, flags, fd, offset); +void *__libafl_raw_mmap(void *addr, size_t length, int prot, int flags, int fd, + off_t offset) { + return (void *)syscall(SYS_mmap, addr, length, prot, flags, fd, offset); } int __libafl_raw_munmap(void *addr, size_t length) { - return syscall(SYS_munmap, addr, length); + return syscall(SYS_munmap, addr, length); } -void *__libafl_raw_mremap(void *old_address, size_t old_size, size_t new_size, int flags, void *new_address) { - return (void*)syscall(SYS_mremap, old_address, old_size, new_size, flags, new_address); +void *__libafl_raw_mremap(void *old_address, size_t old_size, size_t new_size, + int flags, void *new_address) { + return (void *)syscall(SYS_mremap, old_address, old_size, new_size, flags, + new_address); } int __libafl_raw_mprotect(void *addr, size_t len, int prot) { - return syscall(SYS_mprotect, addr, len, prot); + return syscall(SYS_mprotect, addr, len, prot); } int __libafl_raw_madvise(void *addr, size_t length, int advice) { - return syscall(SYS_madvise, addr, length, advice); + return syscall(SYS_madvise, addr, length, advice); } ssize_t __libafl_raw_write(int fd, const void *buf, size_t count) { - return syscall(SYS_write, fd, buf, count); + return syscall(SYS_write, fd, buf, count); } ssize_t __libafl_raw_read(int fd, void *buf, size_t count) { - return syscall(SYS_read, fd, buf, count); + return syscall(SYS_read, fd, buf, count); } void __libafl_raw_exit_group(int status) { - syscall(SYS_exit_group, status); + syscall(SYS_exit_group, status); } diff --git a/utils/drcov_utils/Cargo.toml b/utils/drcov_utils/Cargo.toml new file mode 100644 index 0000000000..b74f894a70 --- /dev/null +++ b/utils/drcov_utils/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "drcov_utils" +edition = "2021" +version.workspace = true +description = "Utility functions to work with DrCov coverage files" +repository = "https://github.com/AFLplusplus/LibAFL/" +license = "MIT OR Apache-2.0" +categories = ["development-tools"] +keywords = ["fuzzing", "libafl", "drcov"] + +[dependencies] +libafl_targets = { workspace = true, default-features = true } +clap = { workspace = true, features = ["derive", "wrap_help"] } + +[lints] +workspace = true diff --git a/utils/drcov_utils/README.md b/utils/drcov_utils/README.md new file mode 100644 index 0000000000..fd16cbd539 --- /dev/null +++ b/utils/drcov_utils/README.md @@ -0,0 +1,16 @@ +# LibAFL DrCov Utilities + +## Dump-DrCov_Addrs + +Simple commandline tool to display a list of all basic block addresses in a program. +This information can, for example, be used for further processing such as in [JmpScare](https://github.com/fgsect/JMPscare) or similar. +At the same time this tools shows how easily LibAFL's `DrCov` module can be used to parse coverage files. + +Run with `cargo run --release --bin drcov_dump_addrs -- -h` + +## DrCov_Merge + +A performant clone of [drcov-merge](https://github.com/vanhauser-thc/drcov-merge) using LibAFL's `DrCov` reader. +It can merge multiple DrCov files into a single DrCov file. + +Run with `cargo run --release --bin drcov_merge -- -h` diff --git a/utils/drcov_utils/src/bin/drcov_dump_addrs.rs b/utils/drcov_utils/src/bin/drcov_dump_addrs.rs new file mode 100644 index 0000000000..533dea3186 --- /dev/null +++ b/utils/drcov_utils/src/bin/drcov_dump_addrs.rs @@ -0,0 +1,80 @@ +use std::{ + fs::{create_dir_all, File}, + io::Write, + path::PathBuf, +}; + +use clap::Parser; +use libafl_targets::drcov::DrCovReader; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +#[allow(clippy::module_name_repetitions)] +#[command( + name = "drcov_dump_addrs", + about, + long_about = "Writes a list of all addresses from a DrCovFile" +)] +pub struct Opt { + #[arg(short, long, help = "DrCov traces to read", required = true)] + pub inputs: Vec, + #[arg( + short, + long, + help = "Output folder to write address files to. If none is set, this will output all addresses to stdout." + )] + pub out_dir: Option, +} + +fn main() { + let opts = Opt::parse(); + + if let Some(out_dir) = &opts.out_dir { + if !out_dir.exists() { + if let Err(err) = create_dir_all(out_dir) { + eprint!("Failed to create dir {out_dir:?}: {err:?}"); + } + } + + assert!(out_dir.is_dir(), "Out_dir {out_dir:?} not a directory!"); + } + + for input in opts.inputs { + let Ok(drcov) = DrCovReader::read(&input) + .map_err(|err| eprint!("Ignored coverage file {input:?}, reason: {err:?}")) + else { + continue; + }; + + if let Some(out_dir) = &opts.out_dir { + // Write files to a directory + let out_file = out_dir.join( + input + .file_name() + .expect("File without filename shouldn't exist"), + ); + + let Ok(mut file) = File::create_new(&out_file).map_err(|err| { + eprintln!("Could not create file {out_file:?} - continuing: {err:?}"); + }) else { + continue; + }; + + println!("Dumping addresses from drcov file {input:?} to {out_file:?}"); + + for line in drcov.basic_block_addresses_u64() { + file.write_all(format!("{line:#x}\n").as_bytes()) + .expect("Could not write to file"); + } + } else { + // dump to stdout + println!("# Blocks covered in {input:?}:"); + + for line in drcov.basic_block_addresses_u64() { + println!("{line:#x}"); + } + + println!(); + } + } +} diff --git a/utils/drcov_utils/src/bin/drcov_merge.rs b/utils/drcov_utils/src/bin/drcov_merge.rs new file mode 100644 index 0000000000..cb3d27725b --- /dev/null +++ b/utils/drcov_utils/src/bin/drcov_merge.rs @@ -0,0 +1,60 @@ +use std::path::PathBuf; + +use clap::Parser; +use libafl_targets::drcov::DrCovReader; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +#[allow(clippy::module_name_repetitions)] +#[command( + name = "drcov_merge", + about, + long_about = "Merges multiple DrCov coverage files into one" +)] +pub struct Opt { + #[arg(short, long, help = "DrCovFiles to merge", required = true)] + pub inputs: Vec, + #[arg(short, long, help = "Output DrCov file")] + pub output: PathBuf, + #[arg( + short, + long, + help = "If set, the merged file will contain every block exactly once." + )] + pub unique: bool, +} + +fn main() { + let opts = Opt::parse(); + + assert!( + opts.inputs.len() > 1, + "Need at least two inputs to merge anything." + ); + + let mut inputs = opts.inputs.iter(); + + let initial_input = inputs.next().unwrap(); + + if opts.unique { + println!("Unique block mode"); + } + + println!("Reading inital drcov file from {initial_input:?}"); + let mut main_drcov = DrCovReader::read(initial_input).expect("Failed to read fist input!"); + + for input in inputs { + if let Ok(current_drcov) = DrCovReader::read(input) + .map_err(|err| eprintln!("Warning: failed to read drcov file at {input:?}: {err:?}")) + { + println!("Merging {input:?}"); + if let Err(err) = main_drcov.merge(¤t_drcov, opts.unique) { + eprintln!("Warning: failed to merge drcov file at {input:?}: {err:?}"); + } + } + } + + main_drcov + .write(opts.output) + .expect("Failed to write merged drcov file to output path"); +} diff --git a/utils/gdb_qemu/Cargo.toml b/utils/gdb_qemu/Cargo.toml index 4f626a8850..5691c03f0e 100644 --- a/utils/gdb_qemu/Cargo.toml +++ b/utils/gdb_qemu/Cargo.toml @@ -1,6 +1,3 @@ [workspace] resolver = "2" -members = [ - "gdb_qemu", - "demo", -] +members = ["gdb_qemu", "demo"] diff --git a/utils/gdb_qemu/Makefile.toml b/utils/gdb_qemu/Makefile.toml index dbdb4bda72..313490dbda 100644 --- a/utils/gdb_qemu/Makefile.toml +++ b/utils/gdb_qemu/Makefile.toml @@ -2,16 +2,16 @@ default_to_workspace = false [env] -DEMO_TARGET="powerpc-unknown-linux-gnu" -HOST_TARGET="x86_64-unknown-linux-gnu" -PROFILE="dev" -DEMO_DIR="${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${DEMO_TARGET}/debug" -TARGET_DIR="${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${HOST_TARGET}/debug" +DEMO_TARGET = "powerpc-unknown-linux-gnu" +HOST_TARGET = "x86_64-unknown-linux-gnu" +PROFILE = "dev" +DEMO_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${DEMO_TARGET}/debug" +TARGET_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${HOST_TARGET}/debug" [env.release] -PROFILE="release" -DEMO_DIR="${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${DEMO_TARGET}/release" -TARGET_DIR="${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${HOST_TARGET}/release" +PROFILE = "release" +DEMO_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${DEMO_TARGET}/release" +TARGET_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/${HOST_TARGET}/release" [tasks.clean] command = "cargo" @@ -27,55 +27,61 @@ dependencies = ["format", "clippy"] command = "cargo" args = [ "build", - "-p", "demo", - "--profile", "${PROFILE}", - "--target", "powerpc-unknown-linux-gnu", + "-p", + "demo", + "--profile", + "${PROFILE}", + "--target", + "powerpc-unknown-linux-gnu", ] [tasks.run_demo] dependencies = ["demo"] command = "cargo" -args = [ - "run", - "-p", "demo", - "--target", "powerpc-unknown-linux-gnu", -] +args = ["run", "-p", "demo", "--target", "powerpc-unknown-linux-gnu"] [tasks.build] dependencies = ["format", "clippy"] command = "cargo" -args = [ - "build", - "-p", "gdb_qemu", - "--profile", "${PROFILE}", -] +args = ["build", "-p", "gdb_qemu", "--profile", "${PROFILE}"] [tasks.run] command = "cargo" -dependencies = [ "demo" ] +dependencies = ["demo"] args = [ "run", - "-p", "gdb_qemu", - "--profile", "${PROFILE}", + "-p", + "gdb_qemu", + "--profile", + "${PROFILE}", "--", - "-p", "1234", - "-L", "trace", - "--", - "qemu-ppc", - "-L", "/usr/powerpc-linux-gnu", - "-g", "1234", - "${DEMO_DIR}/demo" + "-p", + "1234", + "-L", + "trace", + "--", + "qemu-ppc", + "-L", + "/usr/powerpc-linux-gnu", + "-g", + "1234", + "${DEMO_DIR}/demo", ] [tasks.gdb] command = "gdb-multiarch" dependencies = ["demo", "build"] args = [ - "-ex", "set architecture powerpc:MPC8XX", - "-ex", "set pagination off", - "-ex", "set confirm off", - "-ex", "file ${DEMO_DIR}/demo", - "-ex", "target remote | ${TARGET_DIR}/gdb_qemu -p 1234 -L trace qemu-ppc -- -L /usr/powerpc-linux-gnu -g 1234 ${DEMO_DIR}/demo" + "-ex", + "set architecture powerpc:MPC8XX", + "-ex", + "set pagination off", + "-ex", + "set confirm off", + "-ex", + "file ${DEMO_DIR}/demo", + "-ex", + "target remote | ${TARGET_DIR}/gdb_qemu -p 1234 -L trace qemu-ppc -- -L /usr/powerpc-linux-gnu -g 1234 ${DEMO_DIR}/demo", ] [tasks.all] diff --git a/utils/gdb_qemu/demo/Cargo.toml b/utils/gdb_qemu/demo/Cargo.toml index 3f1d0e2600..72c2a63fd5 100644 --- a/utils/gdb_qemu/demo/Cargo.toml +++ b/utils/gdb_qemu/demo/Cargo.toml @@ -1,11 +1,23 @@ [package] name = "gdb_demo" -version = "0.1.0" +version = "0.14.1" edition = "2021" [build-dependencies] -vergen = { version = "8.1.1", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] } +vergen = { version = "8.1.1", features = [ + "build", + "cargo", + "git", + "gitcl", + "rustc", + "si", +] } [dependencies] anyhow = { version = "1.0", default-features = false } -clap = { version = "4.5", default-features = false, features = ["derive", "string", "std", "help"] } +clap = { version = "4.5", default-features = false, features = [ + "derive", + "string", + "std", + "help", +] } diff --git a/utils/gdb_qemu/gdb_qemu/Cargo.toml b/utils/gdb_qemu/gdb_qemu/Cargo.toml index e2dbdab4de..b05aaf5914 100644 --- a/utils/gdb_qemu/gdb_qemu/Cargo.toml +++ b/utils/gdb_qemu/gdb_qemu/Cargo.toml @@ -1,16 +1,34 @@ [package] name = "gdb_qemu" -version = "0.1.0" +version = "0.14.1" edition = "2021" [build-dependencies] -vergen = { version = "8.1.1", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] } +vergen = { version = "8.1.1", features = [ + "build", + "cargo", + "git", + "gitcl", + "rustc", + "si", +] } [dependencies] anyhow = { version = "1.0", default-features = false } -clap = { version = "4.5", default-features = false, features = ["derive", "string", "std", "help", "derive", "error-context", "usage"] } -libc = {version = "0.2", default-features = false } -log = { version = "0.4.20", default-features = false } -nix = { version = "0.29", default-features = false, features = ["signal", "fs"] } +clap = { version = "4.5", default-features = false, features = [ + "derive", + "string", + "std", + "help", + "derive", + "error-context", + "usage", +] } +libc = { version = "0.2", default-features = false } +log = { version = "0.4", default-features = false } +nix = { version = "0.29", default-features = false, features = [ + "signal", + "fs", +] } readonly = { version = "0.2.8", default-features = false } simplelog = { version = "0.12.1", default-features = false } diff --git a/utils/gramatron/construct_automata.py b/utils/gramatron/construct_automata.py index 0cb87c3c75..9f8441b47c 100644 --- a/utils/gramatron/construct_automata.py +++ b/utils/gramatron/construct_automata.py @@ -8,36 +8,37 @@ import sys import json import re from collections import defaultdict + # import pygraphviz as pgv gram_data = None state_count = 1 pda = [] worklist = [] -state_stacks = {} +state_stacks = {} # === If user provides upper bound on the stack size during FSA creation === # Specifies the upper bound to which the stack is allowed to grow # If for any generated state, the stack size is >= stack_limit then this # state is not expanded further. -stack_limit = None +stack_limit = None # Holds the set of unexpanded rules owing to the user-passed stack constraint limit unexpanded_rules = set() + def main(grammar, limit): global worklist, gram_data, stack_limit - current = '0' + current = "0" stack_limit = limit if stack_limit: - print ('[X] Operating in bounded stack mode') + print("[X] Operating in bounded stack mode") - with open(grammar, 'r') as fd: + with open(grammar, "r") as fd: gram_data = json.load(fd) start_symbol = gram_data["Start"][0] worklist.append([current, [start_symbol]]) # print (grammar) - filename = (grammar.split('/')[-1]).split('.')[0] - + filename = (grammar.split("/")[-1]).split(".")[0] while worklist: # Take an element from the worklist @@ -45,69 +46,78 @@ def main(grammar, limit): # print ('Worklist:', worklist) element = worklist.pop(0) prep_transitions(element) - - pda_file = filename + '_transition.json' - graph_file = filename + '.png' + + pda_file = filename + "_transition.json" + graph_file = filename + ".png" # print ('XXXXXXXXXXXXXXXX') # print ('PDA file:%s Png graph file:%s' % (pda_file, graph_file)) # XXX Commented out because visualization of current version of PHP causes segfault # Create the graph and dump the transitions to a file # create_graph(filename) transformed = postprocess() - with open(filename + '_automata.json', 'w+') as fd: + with open(filename + "_automata.json", "w+") as fd: json.dump(transformed, fd) - with open(filename + '_transition.json', 'w+') as fd: + with open(filename + "_transition.json", "w+") as fd: json.dump(pda, fd) if not unexpanded_rules: - print ('[X] No unexpanded rules, absolute FSA formed') + print("[X] No unexpanded rules, absolute FSA formed") exit(0) else: - print ('[X] Certain rules were not expanded due to stack size limit. Inexact approximation has been created and the disallowed rules have been put in {}_disallowed.json'.format(filename)) - print ('[X] Number of unexpanded rules:', len(unexpanded_rules)) - with open(filename + '_disallowed.json', 'w+') as fd: + print( + "[X] Certain rules were not expanded due to stack size limit. Inexact approximation has been created and the disallowed rules have been put in {}_disallowed.json".format( + filename + ) + ) + print("[X] Number of unexpanded rules:", len(unexpanded_rules)) + with open(filename + "_disallowed.json", "w+") as fd: json.dump(list(unexpanded_rules), fd) + def create_graph(filename): - ''' + """ Creates a DOT representation of the PDA - ''' + """ global pda - G = pgv.AGraph(strict = False, directed = True) + G = pgv.AGraph(strict=False, directed=True) for transition in pda: - print ('Transition:', transition) - G.add_edge(transition['source'], transition['dest'], - label = 'Term:{}'.format(transition['terminal'])) - G.layout(prog = 'dot') - print ('Do it up 2') - G.draw(filename + '.png') + print("Transition:", transition) + G.add_edge( + transition["source"], + transition["dest"], + label="Term:{}".format(transition["terminal"]), + ) + G.layout(prog="dot") + print("Do it up 2") + G.draw(filename + ".png") + def prep_transitions(element): - ''' + """ Generates transitions - ''' + """ global gram_data, state_count, pda, worklist, state_stacks, stack_limit, unexpanded_rules state = element[0] try: - nonterminal = element[1][0] + nonterminal = element[1][0] except IndexError: # Final state was encountered, pop from worklist without doing anything return rules = gram_data[nonterminal] count = 1 for rule in rules: - isRecursive = False + isRecursive = False # print ('Current state:', state) terminal, ss, termIsRegex = tokenize(rule) transition = get_template() - transition['trigger'] = '_'.join([state, str(count)]) - transition['source'] = state - transition['dest'] = str(state_count) - transition['ss'] = ss - transition['terminal'] = terminal - transition['rule'] = "{} -> {}".format(nonterminal, rule ) + transition["trigger"] = "_".join([state, str(count)]) + transition["source"] = state + transition["dest"] = str(state_count) + transition["ss"] = ss + transition["terminal"] = terminal + transition["rule"] = "{} -> {}".format(nonterminal, rule) if termIsRegex: - transition['termIsRegex'] = True - + transition["termIsRegex"] = True + # Creating a state stack for the new state try: state_stack = state_stacks[state][:] @@ -118,7 +128,7 @@ def prep_transitions(element): if ss: for symbol in ss[::-1]: state_stack.insert(0, symbol) - transition['stack'] = state_stack + transition["stack"] = state_stack # Check if a recursive transition state being created, if so make a backward # edge and don't add anything to the worklist @@ -128,38 +138,39 @@ def prep_transitions(element): # print ('Stack:', sorted(stack)) # print ('State stack:', sorted(state_stack)) if sorted(stack) == sorted(state_stack): - transition['dest'] = state_element + transition["dest"] = state_element # print ('Recursive:', transition) pda.append(transition) count += 1 isRecursive = True - break + break # If a recursive transition exercised don't add the same transition as a new # edge, continue onto the next transitions if isRecursive: continue - + # If the generated state has a stack size > stack_limit then that state is abandoned # and not added to the FSA or the worklist for further expansion if stack_limit: - if (len(transition['stack']) > stack_limit): - unexpanded_rules.add(transition['rule']) + if len(transition["stack"]) > stack_limit: + unexpanded_rules.add(transition["rule"]) continue # Create transitions for the non-recursive relations and add to the worklist # print ('Normal:', transition) # print ('State2:', state) pda.append(transition) - worklist.append([transition['dest'], transition['stack']]) - state_stacks[transition['dest']] = state_stack + worklist.append([transition["dest"], transition["stack"]]) + state_stacks[transition["dest"]] = state_stack state_count += 1 count += 1 + def tokenize(rule): - ''' + """ Gets the terminal and the corresponding stack symbols from a rule in GNF form - ''' - pattern = re.compile("([r])*\'([\s\S]+)\'([\s\S]*)") + """ + pattern = re.compile("([r])*'([\s\S]+)'([\s\S]*)") terminal = None ss = None termIsRegex = False @@ -176,148 +187,153 @@ def tokenize(rule): return terminal, ss, termIsRegex + def get_template(): transition_template = { - 'trigger':None, - 'source': None, - 'dest': None, - 'termIsRegex': False, - 'terminal' : None, - 'stack': [] - } + "trigger": None, + "source": None, + "dest": None, + "termIsRegex": False, + "terminal": None, + "stack": [], + } return transition_template + def postprocess1(): - ''' + """ Creates a representation to be passed on to the C-module - ''' + """ global pda final_struct = {} # Supporting data structures for if stack limit is imposed culled_pda = [] culled_final = [] - num_transitions = 0 # Keep track of number of transitions - + num_transitions = 0 # Keep track of number of transitions states, final, initial = _get_states() memoized = [[]] * len(states) - print (initial) - assert len(initial) == 1, 'More than one init state found' + print(initial) + assert len(initial) == 1, "More than one init state found" # Cull transitions to states which were not expanded owing to the stack limit if stack_limit: - blocklist = [] for final_state in final: for transition in pda: - if (transition["dest"] == final_state) and (len(transition["stack"]) > 0): + if (transition["dest"] == final_state) and ( + len(transition["stack"]) > 0 + ): blocklist.append(transition["dest"]) continue else: culled_pda.append(transition) - + culled_final = [state for state in final if state not in blocklist] - assert len(culled_final) == 1, 'More than one final state found' + assert len(culled_final) == 1, "More than one final state found" for transition in culled_pda: state = transition["source"] if transition["dest"] in blocklist: - continue + continue num_transitions += 1 - memoized[int(state)].append((transition["trigger"], - int(transition["dest"]), transition["terminal"])) + memoized[int(state)].append( + (transition["trigger"], int(transition["dest"]), transition["terminal"]) + ) final_struct["init_state"] = int(initial) final_struct["final_state"] = int(culled_final[0]) # The reason we do this is because when states are culled, the indexing is # still relative to the actual number of states hence we keep numstates recorded # as the original number of states - print ('[X] Actual Number of states:', len(memoized)) - print ('[X] Number of transitions:', num_transitions) - print ('[X] Original Number of states:', len(states)) + print("[X] Actual Number of states:", len(memoized)) + print("[X] Number of transitions:", num_transitions) + print("[X] Original Number of states:", len(states)) final_struct["pda"] = memoized return final_struct - + # Running FSA construction in exact approximation mode and postprocessing it like so for transition in pda: - state = transition["source"] - memoized[int(state)].append((transition["trigger"], - int(transition["dest"]), transition["terminal"])) + state = transition["source"] + memoized[int(state)].append( + (transition["trigger"], int(transition["dest"]), transition["terminal"]) + ) final_struct["init_state"] = int(initial) final_struct["final_state"] = int(final[0]) - print ('[X] Actual Number of states:', len(memoized)) + print("[X] Actual Number of states:", len(memoized)) final_struct["pda"] = memoized return final_struct + def postprocess(): - ''' + """ Creates a representation to be passed on to the C-module - ''' + """ global pda final_struct = {} memoized = defaultdict(list) # Supporting data structures for if stack limit is imposed culled_pda = [] culled_final = [] - num_transitions = 0 # Keep track of number of transitions - + num_transitions = 0 # Keep track of number of transitions states, final, initial = _get_states() - print (initial) - assert len(initial) == 1, 'More than one init state found' + print(initial) + assert len(initial) == 1, "More than one init state found" # Cull transitions to states which were not expanded owing to the stack limit if stack_limit: - blocklist = [] for final_state in final: for transition in pda: - if (transition["dest"] == final_state) and (len(transition["stack"]) > 0): + if (transition["dest"] == final_state) and ( + len(transition["stack"]) > 0 + ): blocklist.append(transition["dest"]) continue else: culled_pda.append(transition) - + culled_final = [state for state in final if state not in blocklist] - assert len(culled_final) == 1, 'More than one final state found' + assert len(culled_final) == 1, "More than one final state found" for transition in culled_pda: state = transition["source"] if transition["dest"] in blocklist: - continue + continue num_transitions += 1 - memoized[int(state)].append([transition["trigger"], int(transition["dest"]), - transition["terminal"]]) - - - + memoized[int(state)].append( + [transition["trigger"], int(transition["dest"]), transition["terminal"]] + ) + final_struct["init_state"] = int(initial) final_struct["final_state"] = int(culled_final[0]) # The reason we do this is because when states are culled, the indexing is # still relative to the actual number of states hence we keep numstates recorded # as the original number of states - print ('[X] Actual Number of states:', len(memoized.keys())) - print ('[X] Number of transitions:', num_transitions) - print ('[X] Original Number of states:', len(states)) - #final_struct["numstates"] = len(states) - memoized_list = [[]]*len(states) + print("[X] Actual Number of states:", len(memoized.keys())) + print("[X] Number of transitions:", num_transitions) + print("[X] Original Number of states:", len(states)) + # final_struct["numstates"] = len(states) + memoized_list = [[]] * len(states) else: # Running FSA construction in exact approximation mode and postprocessing it like so for transition in pda: - state = transition["source"] - memoized[int(state)].append([transition["trigger"], int(transition["dest"]), - transition["terminal"]]) + state = transition["source"] + memoized[int(state)].append( + [transition["trigger"], int(transition["dest"]), transition["terminal"]] + ) final_struct["init_state"] = int(initial) final_struct["final_state"] = int(final[0]) - print ('[X] Actual Number of states:', len(memoized.keys())) - #final_struct["numstates"] = len(memoized.keys()) - memoized_list = [[]]*len(memoized.keys()) - + print("[X] Actual Number of states:", len(memoized.keys())) + # final_struct["numstates"] = len(memoized.keys()) + memoized_list = [[]] * len(memoized.keys()) + for k in memoized.keys(): memoized_list[k] = memoized[k] final_struct["pda"] = memoized_list @@ -333,19 +349,23 @@ def _get_states(): dest.add(transition["dest"]) source_copy = source.copy() source_copy.update(dest) - return list(source_copy), list(dest.difference(source)), str(''.join(list(source.difference(dest)))) + return ( + list(source_copy), + list(dest.difference(source)), + str("".join(list(source.difference(dest)))), + ) -if __name__ == '__main__': + +if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser(description = 'Script to convert GNF grammar to PDA') + + parser = argparse.ArgumentParser(description="Script to convert GNF grammar to PDA") + parser.add_argument("--gf", type=str, help="Location of GNF grammar") parser.add_argument( - '--gf', - type = str, - help = 'Location of GNF grammar') - parser.add_argument( - '--limit', - type = int, - default = None, - help = 'Specify the upper bound for the stack size') + "--limit", + type=int, + default=None, + help="Specify the upper bound for the stack size", + ) args = parser.parse_args() main(args.gf, args.limit) diff --git a/utils/gramatron/construct_automata/Cargo.toml b/utils/gramatron/construct_automata/Cargo.toml index 958b38fb03..d43563fd11 100644 --- a/utils/gramatron/construct_automata/Cargo.toml +++ b/utils/gramatron/construct_automata/Cargo.toml @@ -1,23 +1,36 @@ [package] name = "construct_automata" -version = "0.1.0" +version.workspace = true edition = "2021" -authors = ["Andrea Fioraldi ", "Dominik Maier "] +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] description = "LibAFL Gramatron Gramar Construction" documentation = "https://docs.rs/libafl" repository = "https://github.com/AFLplusplus/LibAFL/" readme = "../../README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "libafl", "gramatron", "grammar"] -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -libafl = { path = "../../../libafl", default-features = false } -serde_json = "1.0" -regex = "1" -postcard = { version = "1.0", features = ["alloc"], default-features = false } # no_std compatible serde serialization format -clap = { version = "4.5", features = ["derive"] } -# log = "0.4.20" +libafl = { workspace = true } +serde_json = { workspace = true, default-features = true } +regex = { workspace = true } +postcard = { workspace = true, features = [ + "alloc", +], default-features = false } # no_std compatible serde serialization format +clap = { workspace = true, features = ["derive"] } + +[lints] +workspace = true diff --git a/utils/gramatron/gnf_converter.py b/utils/gramatron/gnf_converter.py index 0bc70d1555..37434a7197 100755 --- a/utils/gramatron/gnf_converter.py +++ b/utils/gramatron/gnf_converter.py @@ -16,17 +16,18 @@ DEBUG = False NONTERMINALSET = [] COUNT = 1 + def convert_to_gnf(grammar, start): if DEBUG: - with open('debug_preprocess.json', 'w+') as fd: + with open("debug_preprocess.json", "w+") as fd: json.dump(grammar, fd) - grammar = remove_unit(grammar) # eliminates unit productions + grammar = remove_unit(grammar) # eliminates unit productions if DEBUG: - with open('debug_unit.json', 'w+') as fd: + with open("debug_unit.json", "w+") as fd: json.dump(grammar, fd) - grammar = remove_mixed(grammar) # eliminate terminals existing with non-terminals + grammar = remove_mixed(grammar) # eliminate terminals existing with non-terminals if DEBUG: - with open('debug_mixed.json', 'w+') as fd: + with open("debug_mixed.json", "w+") as fd: json.dump(grammar, fd) grammar = gnf(grammar) @@ -35,12 +36,13 @@ def convert_to_gnf(grammar, start): # with open('debug_gnf_reachable.json', 'w+') as fd: # json.dump(reachable_grammar, fd) if DEBUG: - with open('debug_gnf.json', 'w+') as fd: + with open("debug_gnf.json", "w+") as fd: json.dump(grammar, fd) grammar["Start"] = [start] return grammar + def remove_left_recursion(grammar): # Remove the left recursion in the grammar rules. # This algorithm is adopted from @@ -69,10 +71,10 @@ def remove_left_recursion(grammar): r.append(new_rule) left_recursion = [r[1:] + [new_rule] for r in left_recursion] left_recursion.append(["' '"]) - new_grammar[lhs] = [' '.join(rule) for rule in others] - new_grammar[new_rule] = [' '.join(rule) for rule in left_recursion] + new_grammar[lhs] = [" ".join(rule) for rule in others] + new_grammar[new_rule] = [" ".join(rule) for rule in left_recursion] else: - new_grammar[lhs] = [' '.join(rule) for rule in others] + new_grammar[lhs] = [" ".join(rule) for rule in others] no_left_recursion = True for lhs, rules in old_grammar.items(): for rule in rules: @@ -88,10 +90,11 @@ def remove_left_recursion(grammar): new_grammar = defaultdict(list) return new_grammar + def get_reachable(grammar, start): - ''' + """ Returns a grammar without dead rules - ''' + """ reachable_nt = set() worklist = list() processed = set() @@ -113,9 +116,10 @@ def get_reachable(grammar, start): def gettokens(rule): - pattern = re.compile("([^\s\"\']+)|\"([^\"]*)\"|\'([^\']*)\'") + pattern = re.compile("([^\s\"']+)|\"([^\"]*)\"|'([^']*)'") return [matched.group(0) for matched in pattern.finditer(rule)] + def gnf(grammar): old_grammar = copy.deepcopy(grammar) new_grammar = defaultdict(list) @@ -129,7 +133,7 @@ def gnf(grammar): new_grammar[lhs].append(rule) continue startoken = tokens[0] - assert(startoken != lhs) + assert startoken != lhs endrule = tokens[1:] if not isTerminal(startoken): newrules = [] @@ -139,7 +143,7 @@ def gnf(grammar): temprule.insert(0, extension) newrules.append(temprule) for newnew in newrules: - new_grammar[lhs].append(' '.join(newnew)) + new_grammar[lhs].append(" ".join(newnew)) else: new_grammar[lhs].append(rule) isgnf = True @@ -163,7 +167,7 @@ def process_antlr4_grammar(data): productions = [] production = [] for line in data: - if line != '\n': + if line != "\n": production.append(line) else: productions.append(production) @@ -172,16 +176,17 @@ def process_antlr4_grammar(data): for production in productions: rules = [] init = production[0] - nonterminal = init.split(':')[0] - rules.append(strip_chars(init.split(':')[1]).strip('| ')) + nonterminal = init.split(":")[0] + rules.append(strip_chars(init.split(":")[1]).strip("| ")) for production_rule in production[1:]: - rules.append(strip_chars(production_rule.split('|')[0])) + rules.append(strip_chars(production_rule.split("|")[0])) final_rule_set[nonterminal] = rules # for line in data: # if line != '\n': # production.append(line) return final_rule_set + def remove_unit(grammar): nounitproductions = False old_grammar = copy.deepcopy(grammar) @@ -213,19 +218,21 @@ def remove_unit(grammar): new_grammar = defaultdict(list) return new_grammar + def isTerminal(rule): # pattern = re.compile("([r]*\'[\s\S]+\')") - pattern = re.compile("\'(.*?)\'") + pattern = re.compile("'(.*?)'") match = pattern.match(rule) if match: return True else: return False + def remove_mixed(grammar): - ''' + """ Remove rules where there are terminals mixed in with non-terminals - ''' + """ new_grammar = defaultdict(list) for lhs, rules in grammar.items(): for rhs in rules: @@ -248,17 +255,20 @@ def remove_mixed(grammar): regen_rule.append(new_nonterm) else: regen_rule.append(token) - new_grammar[lhs].append(' '.join(regen_rule)) + new_grammar[lhs].append(" ".join(regen_rule)) return new_grammar + def strip_chars(rule): - return rule.strip('\n\t ') + return rule.strip("\n\t ") + def get_nonterminal(): global COUNT COUNT += 1 return f"GeneratedTermVar{COUNT}" + def terminal_exist(token, grammar): for nonterminal, rules in grammar.items(): if token in rules and len(token) == 1: @@ -269,42 +279,37 @@ def terminal_exist(token, grammar): def main(grammar_file, out, start): grammar = None # If grammar file is a preprocessed NT file, then skip preprocessing - if '.json' in grammar_file: - with open(grammar_file, 'r') as fd: + if ".json" in grammar_file: + with open(grammar_file, "r") as fd: grammar = json.load(fd) - elif '.g4' in grammar_file: - with open(grammar_file, 'r') as fd: + elif ".g4" in grammar_file: + with open(grammar_file, "r") as fd: data = fd.readlines() grammar = process_antlr4_grammar(data) else: - raise('Unknwown file format passed. Accepts (.g4/.json)') + raise ("Unknwown file format passed. Accepts (.g4/.json)") grammar = convert_to_gnf(grammar, start) - with open(out, 'w+') as fd: + with open(out, "w+") as fd: json.dump(grammar, fd) -if __name__ == '__main__': + +if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser(description = 'Script to convert grammar to GNF form') + + parser = argparse.ArgumentParser( + description="Script to convert grammar to GNF form" + ) parser.add_argument( - '--gf', - type = str, - required = True, - help = 'Location of grammar file') + "--gf", type=str, required=True, help="Location of grammar file" + ) parser.add_argument( - '--out', - type = str, - required = True, - help = 'Location of output file') + "--out", type=str, required=True, help="Location of output file" + ) + parser.add_argument("--start", type=str, required=True, help="Start token") parser.add_argument( - '--start', - type = str, - required = True, - help = 'Start token') - parser.add_argument( - '--debug', - action='store_true', - help = 'Write intermediate states to debug files') + "--debug", action="store_true", help="Write intermediate states to debug files" + ) args = parser.parse_args() DEBUG = args.debug diff --git a/utils/libafl_benches/Cargo.toml b/utils/libafl_benches/Cargo.toml index 335b8979b2..c55b83db8a 100644 --- a/utils/libafl_benches/Cargo.toml +++ b/utils/libafl_benches/Cargo.toml @@ -1,5 +1,8 @@ [package] -authors = ["Andrea Fioraldi ", "Dominik Maier "] +authors = [ + "Andrea Fioraldi ", + "Dominik Maier ", +] name = "libafl_benches" version.workspace = true edition = "2021" @@ -9,14 +12,26 @@ repository = "https://github.com/AFLplusplus/LibAFL/" readme = "../../README.md" license = "MIT OR Apache-2.0" keywords = ["fuzzing", "libafl", "benchmarks"] -categories = ["development-tools::testing", "emulators", "embedded", "os", "no-std"] +categories = [ + "development-tools::testing", + "emulators", + "embedded", + "os", + "no-std", +] [dev-dependencies] -criterion = "0.5" # Benchmarking -ahash = { version = "0.8", default-features=false } # The hash function already used in hashbrown -rustc-hash = { version = "1.1", default-features=false } # yet another hash -xxhash-rust = { version = "0.8.5", features = ["xxh3"] } # xxh3 hashing for rust -libafl_bolts = { path = "../../libafl_bolts", default-features=false, features = ["xxh3", "alloc"] } # libafl_bolts +libafl_bolts = { workspace = true, features = ["xxh3", "alloc"] } # libafl_bolts + +criterion = "0.5.1" # Benchmarking +ahash = { workspace = true, default-features = false } # The hash function already used in hashbrown +rustc-hash = { version = "2.0.0", default-features = false } # yet another hash +xxhash-rust = { version = "0.8.12", features = [ + "xxh3", +] } # xxh3 hashing for rust + +[lints] +workspace = true [[bench]] name = "rand_speeds" @@ -25,4 +40,3 @@ harness = false [[bench]] name = "hash_speeds" harness = false - diff --git a/utils/libafl_benches/benches/hash_speeds.rs b/utils/libafl_benches/benches/hash_speeds.rs index e1aee1cab2..a929a6a51e 100644 --- a/utils/libafl_benches/benches/hash_speeds.rs +++ b/utils/libafl_benches/benches/hash_speeds.rs @@ -1,6 +1,9 @@ //! Compare the speed of rust hash implementations -use std::hash::{BuildHasher, Hasher}; +use std::{ + hash::{BuildHasher, Hasher}, + num::NonZero, +}; use criterion::{black_box, criterion_group, criterion_main, Criterion}; use libafl_bolts::rands::{Rand, StdRand}; @@ -11,28 +14,27 @@ fn criterion_benchmark(c: &mut Criterion) { let mut rand = StdRand::with_seed(0); let mut bench_vec: Vec = vec![]; for _ in 0..2 << 16 { - bench_vec.push(rand.below(256) as u8); + bench_vec.push(rand.below(NonZero::new(256).unwrap()) as u8); } c.bench_function("xxh3", |b| { - b.iter(|| xxh3::xxh3_64_with_seed(black_box(&bench_vec), 0)); + b.iter(|| black_box(xxh3::xxh3_64_with_seed(&bench_vec, 0))); }); /*c.bench_function("const_xxh3", |b| { b.iter(|| const_xxh3::xxh3_64_with_seed(black_box(&bench_vec), 0)) });*/ c.bench_function("ahash", |b| { b.iter(|| { - let mut hasher = - black_box(ahash::RandomState::with_seeds(123, 456, 789, 123).build_hasher()); + let mut hasher = ahash::RandomState::with_seeds(123, 456, 789, 123).build_hasher(); hasher.write(black_box(&bench_vec)); - hasher.finish(); + black_box(hasher.finish()); }); }); c.bench_function("fxhash", |b| { b.iter(|| { - let mut hasher = black_box(rustc_hash::FxHasher::default()); + let mut hasher = rustc_hash::FxHasher::default(); hasher.write(black_box(&bench_vec)); - hasher.finish(); + black_box(hasher.finish()); }); }); } diff --git a/utils/libafl_fmt/Cargo.toml b/utils/libafl_fmt/Cargo.toml index 7f1b1e8d67..ee2f748a8f 100644 --- a/utils/libafl_fmt/Cargo.toml +++ b/utils/libafl_fmt/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libafl_fmt" -version = "0.1.0" +version = "0.14.1" edition = "2021" description = "Format the LibAFL repository" authors = ["Romain Malmain "] @@ -11,7 +11,13 @@ repository = "https://github.com/AFLplusplus/LibAFL.git" project-root = "0.2" walkdir = "2.5" regex = "1.10" -tokio = { version = "1.38", features = ["process", "rt", "rt-multi-thread", "macros"] } +tokio = { version = "1.38", features = [ + "process", + "rt", + "rt-multi-thread", + "macros", +] } clap = { version = "4.5", features = ["derive"] } exitcode = "1.1" -which = "6.0" \ No newline at end of file +which = "6.0" +colored = "2.1.0" diff --git a/utils/libafl_fmt/src/main.rs b/utils/libafl_fmt/src/main.rs index 5a38fc9eb2..336a409c00 100644 --- a/utils/libafl_fmt/src/main.rs +++ b/utils/libafl_fmt/src/main.rs @@ -69,17 +69,44 @@ #![allow(clippy::borrow_as_ptr)] #![allow(clippy::borrow_deref_ref)] -use std::{io, io::ErrorKind, path::PathBuf, str::from_utf8}; +use std::{ + fs::read_to_string, + io, + io::ErrorKind, + path::{Path, PathBuf}, + str::from_utf8, +}; use clap::Parser; +use colored::Colorize; use regex::RegexSet; use tokio::{process::Command, task::JoinSet}; use walkdir::{DirEntry, WalkDir}; use which::which; -async fn run_cargo_fmt(path: PathBuf, is_check: bool, verbose: bool) -> io::Result<()> { - // Sanity Check - assert_eq!(path.file_name().unwrap().to_str().unwrap(), "Cargo.toml"); +const REF_LLVM_VERSION: u32 = 19; + +fn is_workspace_toml(path: &Path) -> bool { + for line in read_to_string(path).unwrap().lines() { + if line.eq("[workspace]") { + return true; + } + } + + false +} + +async fn run_cargo_fmt(cargo_file_path: PathBuf, is_check: bool, verbose: bool) -> io::Result<()> { + // Make sure we parse the correct file + assert_eq!( + cargo_file_path.file_name().unwrap().to_str().unwrap(), + "Cargo.toml" + ); + + if is_workspace_toml(cargo_file_path.as_path()) { + println!("[*] Skipping {}...", cargo_file_path.as_path().display()); + return Ok(()); + } let task_str = if is_check { "Checking" } else { "Formatting" }; @@ -89,23 +116,29 @@ async fn run_cargo_fmt(path: PathBuf, is_check: bool, verbose: bool) -> io::Resu .arg("+nightly") .arg("fmt") .arg("--manifest-path") - .arg(path.as_path()); + .arg(cargo_file_path.as_path()); if is_check { fmt_command.arg("--check"); } if verbose { - println!("[*] {} {}...", task_str, path.as_path().display()); + println!( + "[*] {} {}...", + task_str, + cargo_file_path.as_path().display() + ); } let res = fmt_command.output().await?; if !res.status.success() { - println!("{}", from_utf8(&res.stderr).unwrap()); + let stdout = from_utf8(&res.stdout).unwrap(); + let stderr = from_utf8(&res.stderr).unwrap(); return Err(io::Error::new( ErrorKind::Other, - format!("Cargo fmt failed. Run cargo fmt for {path:#?}"), + format!( + "Cargo fmt failed. Run cargo fmt for {cargo_file_path:#?}.\nstdout: {stdout}\nstderr: {stderr}\ncommand: {fmt_command:?}"), )); } @@ -113,29 +146,29 @@ async fn run_cargo_fmt(path: PathBuf, is_check: bool, verbose: bool) -> io::Resu } async fn run_clang_fmt( - path: PathBuf, - clang: &str, + c_file_path: PathBuf, + clang: String, is_check: bool, verbose: bool, ) -> io::Result<()> { let task_str = if is_check { "Checking" } else { "Formatting" }; - let mut fmt_command = Command::new(clang); + let mut fmt_command = Command::new(&clang); fmt_command .arg("-i") .arg("--style") .arg("file") - .arg(path.as_path()); + .arg(c_file_path.as_path()); if is_check { fmt_command.arg("-Werror").arg("--dry-run"); } - fmt_command.arg(path.as_path()); + fmt_command.arg(c_file_path.as_path()); if verbose { - println!("[*] {} {}...", task_str, path.as_path().display()); + println!("[*] {} {}...", task_str, c_file_path.as_path().display()); } let res = fmt_command.output().await?; @@ -143,11 +176,12 @@ async fn run_clang_fmt( if res.status.success() { Ok(()) } else { - let stderr = from_utf8(&res.stderr).unwrap().to_string(); + let stdout = from_utf8(&res.stdout).unwrap(); + let stderr = from_utf8(&res.stderr).unwrap(); println!("{stderr}"); Err(io::Error::new( ErrorKind::Other, - format!("{clang} failed: {stderr}"), + format!("{clang} failed.\nstdout:{stdout}\nstderr:{stderr}"), )) } } @@ -188,6 +222,7 @@ async fn main() -> io::Result<()> { r".*AFLplusplus.*", r".*Little-CMS.*", r".*cms_transform_fuzzer.cc.*", + r".*sqlite3.*", ]) .expect("Could not create the regex set from the given regex"); @@ -209,27 +244,63 @@ async fn main() -> io::Result<()> { .map(DirEntry::into_path) .collect(); + // cargo version + println!( + "Using {}", + get_version_string("cargo", &["+nightly"]).await? + ); + + // rustfmt version + println!( + "Using {}", + get_version_string("cargo", &["+nightly", "fmt"]).await? + ); + + let reference_clang_format = format!( + "clang-format-{}", + std::env::var("MAIN_LLVM_VERSION") + .inspect(|e| { + println!( + "Overriding clang-format version from the default {REF_LLVM_VERSION} to {e} using env variable MAIN_LLVM_VERSION" + ); + }) + .unwrap_or(REF_LLVM_VERSION.to_string()) + ); + let unspecified_clang_format = "clang-format"; + + let (clang, version, warning) = if which(&reference_clang_format).is_ok() { + ( + Some(reference_clang_format.as_str()), + Some(get_version_string(&reference_clang_format, &[]).await?), + None, + ) + } else if which(unspecified_clang_format).is_ok() { + let version = get_version_string(unspecified_clang_format, &[]).await?; + ( + Some(unspecified_clang_format), + Some(version.clone()), + Some(format!( + "using {version}, could provide a different result from {reference_clang_format}" + )), + ) + } else { + ( + None, + None, + Some("clang-format not found. Skipping C formatting...".to_string()), + ) + }; + + if let Some(version) = &version { + println!("Using {version}"); + } + let mut tokio_joinset = JoinSet::new(); for project in rust_projects_to_fmt { tokio_joinset.spawn(run_cargo_fmt(project, cli.check, cli.verbose)); } - let (clang, warning) = if which("clang-format-17").is_ok() { - // can't use 18 for ci. - (Some("clang-format-17"), None) - } else if which("clang-format").is_ok() { - ( - Some("clang-format"), - Some("using clang-format, could provide a different result from clang-format-18"), - ) - } else { - ( - None, - Some("clang-format not found. Skipping C formatting..."), - ) - }; - // println!("Using {:#?} to format...", clang); if let Some(clang) = clang { let c_files_to_fmt: Vec = WalkDir::new(&libafl_root_dir) .into_iter() @@ -241,7 +312,12 @@ async fn main() -> io::Result<()> { .collect(); for c_file in c_files_to_fmt { - tokio_joinset.spawn(run_clang_fmt(c_file, clang, cli.check, cli.verbose)); + tokio_joinset.spawn(run_clang_fmt( + c_file, + clang.to_string(), + cli.check, + cli.verbose, + )); } } @@ -255,9 +331,7 @@ async fn main() -> io::Result<()> { } } - if let Some(warning) = warning { - println!("Warning: {warning}"); - } + let _ = warning.map(print_warning); if cli.check { println!("[*] Check finished successfully."); @@ -267,3 +341,18 @@ async fn main() -> io::Result<()> { Ok(()) } + +async fn get_version_string(path: &str, args: &[&str]) -> Result { + let version = Command::new(path) + .args(args) + .arg("--version") + .output() + .await? + .stdout; + Ok(from_utf8(&version).unwrap().replace('\n', "")) +} + +#[allow(clippy::needless_pass_by_value)] +fn print_warning(warning: String) { + println!("\n{} {}\n", "Warning:".yellow().bold(), warning); +} diff --git a/utils/libafl_jumper/Cargo.toml b/utils/libafl_jumper/Cargo.toml new file mode 100644 index 0000000000..0d29ce9476 --- /dev/null +++ b/utils/libafl_jumper/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "libafl_jumper" +edition = "2021" +version.workspace = true +description = "LibAFL_Jumper: Jump to any address in memory." +repository = "https://github.com/AFLplusplus/LibAFL/" +license = "MIT OR Apache-2.0" +categories = ["development-tools::testing", "os"] +keywords = ["fuzzing", "libafl"] + +[features] +default = ["std"] +std = [] + +[build-dependencies] +cc = "1" diff --git a/utils/libafl_jumper/README.md b/utils/libafl_jumper/README.md new file mode 100644 index 0000000000..0a6d6ae93d --- /dev/null +++ b/utils/libafl_jumper/README.md @@ -0,0 +1,28 @@ +# LIBAFL_JUMPER + +If you want to replace your unicorn use with `libafl_qemu`, this might be your tool. +It can run as stub binary. +From inside LibAFL, you can break at `jmp`, then mmap and load all of the memory you need, +then continue running. + +Depending on your toolchain, you want to build the tool for the guest platform. +Since the loader will run inside `qemu-linux-user`, the target OS needs to be `linux` +(Of course, there might be other use cases for you). + +To build this statically linked with `musl` libc, we can do the following: + +```sh +# Install cross compiler toolchain +apt-get install gcc-arm-linux-gnueabihf +# Install the rust toolchain parts +rustup target add arm-unknown-linux-musleabi +# Build for the target. The addresses in the linker script should not be used by your target binary. +RUSTFLAGS="-C target-feature=+crt-static, -C link-self-contained=yes -C linker=arm-linux-gnueabi-gcc -C link-arg=T$(realpath linker_script.ld)" cargo build --target=arm-unknown-linux-musleabi --release +``` + +↪ Or do that for any other architecture, such as `x86_64-unknown-linux-musl`. + +Then, you can run libafl_jumper with a hex-encoded address as parameter, and break at the `libafl_jmp` and (m)map your memory to the right place in memory, before continuing to run. +The jumper will then jump to the provided address. + +Enjoy jumping like a little bunny. diff --git a/utils/libafl_jumper/linker_script.ld b/utils/libafl_jumper/linker_script.ld new file mode 100644 index 0000000000..57daf1a241 --- /dev/null +++ b/utils/libafl_jumper/linker_script.ld @@ -0,0 +1,12 @@ +MEMORY +{ + FLASH (rx) : ORIGIN = 0x20001000, LENGTH = 512K + RAM (xrw) : ORIGIN = 0x21000000, LENGTH = 64K +} + +SECTIONS +{ + .text : { *(.text*) } > FLASH + .rodata : { *(.rodata*) } > FLASH + .bss : { *(.bss*) } > RAM +} \ No newline at end of file diff --git a/utils/libafl_jumper/src/main.rs b/utils/libafl_jumper/src/main.rs new file mode 100644 index 0000000000..72cbabff11 --- /dev/null +++ b/utils/libafl_jumper/src/main.rs @@ -0,0 +1,162 @@ +#![cfg_attr(not(feature = "std"), no_main)] +#![cfg_attr(not(feature = "std"), no_std)] + +#[cfg(not(feature = "std"))] +use core::ffi::CStr; +#[cfg(not(any(test, feature = "std")))] +use core::panic::PanicInfo; +use core::{arch::asm, ffi::c_void}; + +#[cfg(not(any(test, feature = "std")))] +#[panic_handler] +fn panic(_panic: &PanicInfo<'_>) -> ! { + // No panic! + // # Safety + // This will crash for sure. + unsafe { + libafl_jmp(0x50000B4D_u32 as _); + } +} + +/// Good to kickstart an emulated fuzzing process inside `LibAFL_QEMU`. +/// +/// # Safety +/// This is the most unsafest function you will see today. +/// +/// Man ALL IS LOŚ͖̩͇̗̪̏̈́T ALL I​S LOST the pon̷y he comes he c̶̮omes he comes the ich​or permeates all MY FACE MY FACE ᵒh god no NO NOO̼O​O NΘ stop the an​*̶͑̾̾​̅ͫ͏̙̤g͇̫͛͆̾ͫ̑͆l͖͉̗̩̳̟̍ͫͥͨe̠̅s ͎a̧͈͖r̽̾̈́͒͑e n​ot rè̑ͧ̌aͨl̘̝̙̃ͤ͂̾̆ ZA̡͊͠͝LGΌ ISͮ̂҉̯͈͕̹̘̱ TO͇̹̺ͅƝ̴ȳ̳ TH̘Ë͖́̉ ͠P̯͍̭O̚​N̐Y̡ H̸̡̪̯ͨ͊̽̅̾̎Ȩ̬̩̾͛ͪ̈́̀́͘ ̶̧̨̱̹̭̯ͧ̾ͬC̷̙̲̝͖ͭ̏ͥͮ͟Oͮ͏̮̪̝͍M̲̖͊̒ͪͩͬ̚̚͜Ȇ̴̟̟͙̞ͩ͌͝S̨̥̫͎̭ͯ̿̔̀ͅ +#[inline(never)] +#[no_mangle] +pub unsafe extern "C" fn libafl_jmp(target: *mut c_void) -> ! { + #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] + asm!( + "jmp {target}", // Jump on x86 + target = in(reg) target, + options(noreturn) + ); + + #[cfg(target_arch = "arm")] + asm!( + "bx {target}", // Branch and exchange instruction (ARM) + target = in(reg) target, + options(noreturn) + ); + + #[cfg(target_arch = "aarch64")] + asm!( + "br {target}", // Branch register instruction (AArch64) + target = in(reg) target, + options(noreturn) + ); + + #[cfg(target_arch = "hexagon")] + asm!( + "jumpr {target}", // Jump register instruction (Hexagon) + target = in(reg) target, + options(noreturn) + ); + + #[cfg(target_arch = "hexagon")] + asm!( + "b {target}", // Branch instruction (PowerPC) + target = in(reg) target, + options(noreturn) + ); + + #[cfg(any(target_arch = "riscv32", target_arch = "riscv64"))] + asm!( + "jalr x0, {target}, 0", // Jump and link register (RISC-V) + target = in(reg) target, + options(noreturn) + ); + + #[cfg(target_arch = "mips")] + asm!( + "jr {target}", // Jump register (MIPS) + "nop", // Delay slot + target = in(reg) target, + options(noreturn) + ); + + //unreachable!("asm should have jumped!"); +} + +/// The "normal" rust main, mainly for testing +#[cfg(feature = "std")] +fn main() { + let args: Vec = std::env::args().collect(); + + assert!(args.len() >= 2, "No address given"); + + let mut hex_str: &str = &args[1]; + if hex_str.starts_with("0x") || hex_str.starts_with("0X") { + hex_str = &hex_str[2..]; + } + println!("Jumping to {hex_str}"); + decode_hex_and_jmp(hex_str); +} + +/// Main for `no_std` - that's the one we will use inside LibAFL_QEMU. +#[cfg(not(feature = "std"))] +#[no_mangle] +pub unsafe extern "C" fn main(argc: i32, argv: *const *const u8) -> ! { + if argc < 2 || argv.is_null() { + // No params - nothing we can do. + // # Safety + // So much crash. + libafl_jmp(0x42424242_u32 as _); + } + + let arg = argv.add(1); + let mut val = *arg; + + if *val == b'0' && *val.add(1) == b'x' || *val.add(1) == b'X' { + // strip leading 0x + val = val.add(2); + } + + let hex_string = CStr::from_ptr(*val as _).to_str().unwrap(); + + decode_hex_and_jmp(hex_string); +} + +fn decode_hex_and_jmp(hex_string: &str) -> ! { + let Ok(addr) = u64::from_str_radix(hex_string, 16) else { + panic!("Could not parse hex string: {hex_string}"); + }; + + #[cfg(feature = "std")] + println!("Hex: {addr:#x}"); + + #[allow(clippy::cast_possible_truncation)] + let addr = addr as usize; + + let entrypoint = addr as *mut c_void; + + // # Safety + // Obviously unsafe, we're just jumping to a random place in memory... + unsafe { libafl_jmp(entrypoint) } +} + +#[cfg(test)] +mod test { + + extern "C" { + fn exit(ret: i32); + } + + use crate::libafl_jmp; + + #[inline(never)] + pub fn do_exit() { + unsafe { exit(0) } + } + + /// Tests if we can jump to exit. + /// There's a chance this won't work on some systems. + /// Either the assembly above is broken, or something else simply goes wrong. + /// We're deeeep in UB land here. + #[test] + fn test_jmp_to_panic() { + unsafe { libafl_jmp(do_exit as _) } + } +} diff --git a/utils/multi_machine_generator/.gitignore b/utils/multi_machine_generator/.gitignore new file mode 100644 index 0000000000..8e392c77d0 --- /dev/null +++ b/utils/multi_machine_generator/.gitignore @@ -0,0 +1,2 @@ +*.txt +*.dot \ No newline at end of file diff --git a/utils/multi_machine_generator/Cargo.toml b/utils/multi_machine_generator/Cargo.toml new file mode 100644 index 0000000000..822dd6593b --- /dev/null +++ b/utils/multi_machine_generator/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "multi_machine_generator" +authors = ["Romain Malmain "] +version = "0.14.1" +description = "Generator for multi-machine setup" +license = "MIT OR Apache-2.0" +keywords = ["fuzzing", "testing", "security"] +edition = "2021" + +[dependencies] +petgraph = "0.6" +clap = { version = "4.5", features = ["derive"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" diff --git a/utils/multi_machine_generator/src/graph.rs b/utils/multi_machine_generator/src/graph.rs new file mode 100644 index 0000000000..f65703f2cf --- /dev/null +++ b/utils/multi_machine_generator/src/graph.rs @@ -0,0 +1,132 @@ +use std::{ + fmt::{Display, Formatter}, + mem, +}; + +use petgraph::{graph::NodeIndex, Direction, Graph}; +use serde::Serialize; + +/// A node of the network +#[derive(Debug, Clone)] +pub struct MultiMachineNode { + addr: String, +} + +/// The final configuration of a node on the network +#[derive(Debug, Clone, Serialize)] +pub struct MultiMachineNodeConfig { + addr: String, + parent: Option, + port: u16, +} + +/// The tree +pub struct MultiMachineTree { + pub graph: Graph, +} + +pub struct MultiMachineEdge; + +impl Display for MultiMachineEdge { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "") + } +} + +impl Display for MultiMachineNode { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.addr) + } +} + +impl MultiMachineNode { + #[must_use] + pub fn new(addr: String) -> Self { + Self { addr } + } +} + +impl MultiMachineTree { + /// Generate a multi-machine tree. + /// + /// + /// - machines: machines to add. + /// - `max_children_per_parent`: each parent will have at most this amount of children + #[must_use] + pub fn generate(machines: &[String], max_children_per_parent: u64) -> Self { + let mut graph = Graph::::new(); + let mut machines = Vec::from(machines); + machines.reverse(); + + let root = if let Some(root) = machines.pop() { + graph.add_node(MultiMachineNode::new(root)) + } else { + return Self { graph }; + }; + + let mut graph = Self { graph }; + + let mut populate_idx = 0u64; // round-robin population to avoid congestion + let mut nodes_to_populate_now: Vec = vec![root]; // current nodes we are working on + + let mut nodes_to_populate_later: Vec = Vec::new(); + + // place all the machines in the graph + while let Some(machine) = machines.pop() { + if graph.nb_children(nodes_to_populate_now[populate_idx as usize]) + == max_children_per_parent + { + nodes_to_populate_now = mem::take(&mut nodes_to_populate_later); + populate_idx = 0; // should be useless + } + + let new_child = graph.add_child( + nodes_to_populate_now[populate_idx as usize], + MultiMachineNode::new(machine), + ); + nodes_to_populate_later.push(new_child); + + populate_idx = (populate_idx + 1) % nodes_to_populate_now.len() as u64; + } + + graph + } + + fn add_child(&mut self, parent: NodeIndex, child: MultiMachineNode) -> NodeIndex { + let child_idx = self.graph.add_node(child); + self.graph.add_edge(child_idx, parent, MultiMachineEdge); + child_idx + } + + fn nb_children(&self, node: NodeIndex) -> u64 { + self.graph + .neighbors_directed(node, Direction::Incoming) + .count() as u64 + } + + fn get_parent(&self, node: NodeIndex) -> Option { + self.graph + .neighbors_directed(node, Direction::Outgoing) + .next() + } + + #[must_use] + pub fn get_config(&self, default_port: u16) -> Vec { + let mut node_configs: Vec = Vec::new(); + for node_idx in self.graph.node_indices() { + let node = &self.graph[node_idx]; + + let parent = self + .get_parent(node_idx) + .map(|parent_idx| self.graph[parent_idx].addr.clone()); + + node_configs.push(MultiMachineNodeConfig { + addr: node.addr.clone(), + parent, + port: default_port, + }); + } + + node_configs + } +} diff --git a/utils/multi_machine_generator/src/main.rs b/utils/multi_machine_generator/src/main.rs new file mode 100644 index 0000000000..ccb560f033 --- /dev/null +++ b/utils/multi_machine_generator/src/main.rs @@ -0,0 +1,54 @@ +//! Multi Machine Generator +//! +//! Generates a ready-to-run multi-machine configuration, as a balanced tree. +//! A simple algorithm will first create such a tree, and associate IPs to them. +//! It will finally output a set of commands to run to have each fuzzer communicating correctly with the other machines of the network. +//! +//! We suppose everyone is on the same network and the machines have the fuzzer ready to run on each machine. + +use std::{fs, fs::File, io, io::BufRead, path::PathBuf}; + +use clap::Parser; +use petgraph::dot::Dot; + +use crate::graph::MultiMachineTree; + +pub mod graph; + +#[derive(Parser)] +struct Opt { + #[arg(short, long)] + machines_file: PathBuf, + #[arg(long)] + dot_output: Option, + #[arg(short, long)] + json_output: Option, + #[arg(short, long, default_value_t = 50000)] + default_port: u16, + // #[arg(short, long)] + // cmd_file: PathBuf, +} + +fn main() { + let opt = Opt::parse(); + + let machine_file = File::open(opt.machines_file.as_path()).unwrap(); + let machines: Vec = io::BufReader::new(machine_file) + .lines() + .map(|m| m.unwrap()) + .collect(); + + let multi_machine_graph = MultiMachineTree::generate(&machines, 3); + + // final graph + if let Some(dot_path) = opt.dot_output { + let dot = Dot::new(&multi_machine_graph.graph); + fs::write(dot_path, format!("{dot}")).unwrap(); + } + + if let Some(json_path) = opt.json_output { + let cfg = multi_machine_graph.get_config(opt.default_port); + let cfg_json = serde_json::to_string_pretty(&cfg).unwrap(); + fs::write(json_path, cfg_json).unwrap(); + } +} diff --git a/utils/noaslr/Cargo.toml b/utils/noaslr/Cargo.toml index ad6c5a6587..93487d48fa 100644 --- a/utils/noaslr/Cargo.toml +++ b/utils/noaslr/Cargo.toml @@ -1,7 +1,3 @@ [workspace] resolver = "2" -members = [ - "noaslr", - "demo", - "libnoaslr" -] +members = ["noaslr", "demo", "libnoaslr"] diff --git a/utils/noaslr/Makefile.toml b/utils/noaslr/Makefile.toml index 25f7859eaa..ce1fed9b66 100644 --- a/utils/noaslr/Makefile.toml +++ b/utils/noaslr/Makefile.toml @@ -2,12 +2,12 @@ default_to_workspace = false [env] -PROFILE="dev" -BUILD_DIR="${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/debug" +PROFILE = "dev" +BUILD_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/debug" [env.release] -PROFILE="release" -BUILD_DIR="${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/release" +PROFILE = "release" +BUILD_DIR = "${CARGO_MAKE_CRATE_TARGET_DIRECTORY}/release" [tasks.clean] command = "cargo" @@ -21,68 +21,57 @@ args = ["fmt", "--", "--emit=files"] [tasks.demo] dependencies = ["format", "clippy"] command = "cargo" -args = [ - "build", - "-p", "demo", - "--profile", "${PROFILE}", -] +args = ["build", "-p", "demo", "--profile", "${PROFILE}"] [tasks.run_demo] dependencies = ["demo"] command = "cargo" -args = [ - "run", - "-p", "demo", -] +args = ["run", "-p", "demo"] [tasks.build] dependencies = ["format", "clippy"] command = "cargo" -args = [ - "build", - "-p", "noaslr", - "--profile", "${PROFILE}", -] +args = ["build", "-p", "noaslr", "--profile", "${PROFILE}"] [tasks.buildlib] dependencies = ["format", "clippy"] command = "cargo" -args = [ - "build", - "-p", "libnoaslr", - "--profile", "${PROFILE}", -] +args = ["build", "-p", "libnoaslr", "--profile", "${PROFILE}"] [tasks.run] command = "cargo" -dependencies = [ "demo" ] -env = { "ZZZ_TEST_ZZZ" = "ZZZ TEST ZZZ"} +dependencies = ["demo"] +env = { "ZZZ_TEST_ZZZ" = "ZZZ TEST ZZZ" } args = [ "run", - "-p", "noaslr", - "--profile", "${PROFILE}", + "-p", + "noaslr", + "--profile", + "${PROFILE}", "--", - "${BUILD_DIR}/demo", - "--", - "-f", - "/proc/self/maps", - "--", - "test" + "${BUILD_DIR}/demo", + "--", + "-f", + "/proc/self/maps", + "--", + "test", ] [tasks.runlib] command = "cargo" -dependencies = [ "demo", "buildlib" ] -env = { "LD_PRELOAD" = "${BUILD_DIR}/libnoaslr.so", "ZZZ_TEST_ZZZ" = "ZZZ TEST ZZZ"} +dependencies = ["demo", "buildlib"] +env = { "LD_PRELOAD" = "${BUILD_DIR}/libnoaslr.so", "ZZZ_TEST_ZZZ" = "ZZZ TEST ZZZ" } args = [ "run", - "-p", "demo", - "--profile", "${PROFILE}", + "-p", + "demo", + "--profile", + "${PROFILE}", "--", - "-f", - "/proc/self/maps", - "--", - "test" + "-f", + "/proc/self/maps", + "--", + "test", ] [tasks.all] diff --git a/utils/noaslr/demo/Cargo.toml b/utils/noaslr/demo/Cargo.toml index d603fab37e..fdb5cb70d3 100644 --- a/utils/noaslr/demo/Cargo.toml +++ b/utils/noaslr/demo/Cargo.toml @@ -1,12 +1,24 @@ [package] name = "noaslr_demo" -version = "0.1.0" +version = "0.14.1" edition = "2021" [build-dependencies] -vergen = { version = "8.1.1", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] } +vergen = { version = "8.1.1", features = [ + "build", + "cargo", + "git", + "gitcl", + "rustc", + "si", +] } [dependencies] anyhow = { version = "1.0", default-features = false } -clap = { version = "4.5", default-features = false, features = ["derive", "string", "std", "help"] } +clap = { version = "4.5", default-features = false, features = [ + "derive", + "string", + "std", + "help", +] } readonly = { version = "0.2.8", default-features = false } diff --git a/utils/noaslr/libnoaslr/Cargo.toml b/utils/noaslr/libnoaslr/Cargo.toml index ad8c15058d..5035ba1430 100644 --- a/utils/noaslr/libnoaslr/Cargo.toml +++ b/utils/noaslr/libnoaslr/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "libnoaslr" -version = "0.1.0" +version = "0.14.1" edition = "2021" [lib] @@ -12,8 +12,11 @@ crate-type = ["dylib"] [dependencies] anyhow = { version = "1.0", default-features = false } -ctor = { version = "0.2", default-features = false } -nix = { version = "0.29", default-features = false, features = ["process", "personality"] } +ctor = { version = "0.2.9", default-features = false } +nix = { version = "0.29", default-features = false, features = [ + "process", + "personality", +] } [target.'cfg(any(target_os = "freebsd", target_os = "netbsd"))'.dependencies] -libc = "0.2" +libc = "0.2" diff --git a/utils/noaslr/noaslr/Cargo.toml b/utils/noaslr/noaslr/Cargo.toml index ffc331cdee..c3e4e526e3 100644 --- a/utils/noaslr/noaslr/Cargo.toml +++ b/utils/noaslr/noaslr/Cargo.toml @@ -1,16 +1,34 @@ [package] name = "noaslr" -version = "0.1.0" +version = "0.14.1" edition = "2021" [build-dependencies] -vergen = { version = "8.1.1", features = ["build", "cargo", "git", "gitcl", "rustc", "si"] } +vergen = { version = "8.1.1", features = [ + "build", + "cargo", + "git", + "gitcl", + "rustc", + "si", +] } [dependencies] anyhow = { version = "1.0", default-features = false } -clap = { version = "4.5", default-features = false, features = ["derive", "string", "std", "help", "derive", "error-context", "usage"] } -log = { version = "0.4.20", default-features = false } -nix = { version = "0.29", default-features = false, features = ["process", "personality"] } +clap = { version = "4.5", default-features = false, features = [ + "derive", + "string", + "std", + "help", + "derive", + "error-context", + "usage", +] } +log = { version = "0.4", default-features = false } +nix = { version = "0.29", default-features = false, features = [ + "process", + "personality", +] } readonly = { version = "0.2.8", default-features = false } simplelog = { version = "0.12.1", default-features = false }