diff --git a/.cirrus.yml b/.cirrus.yml deleted file mode 100644 index 393237af66..0000000000 --- a/.cirrus.yml +++ /dev/null @@ -1,214 +0,0 @@ -env: # Global defaults - CIRRUS_CLONE_DEPTH: 1 - CIRRUS_LOG_TIMESTAMP: true - MAKEJOBS: "-j10" - TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache - CI_FAILFAST_TEST_LEAVE_DANGLING: "1" # Cirrus CI does not care about dangling processes and setting this variable avoids killing the CI script itself on error - -# A self-hosted machine(s) can be used via Cirrus CI. It can be configured with -# multiple users to run tasks in parallel. No sudo permission is required. -# -# https://cirrus-ci.org/guide/persistent-workers/ -# -# Generally, a persistent worker must run Ubuntu 23.04+ or Debian 12+. -# -# The following specific types should exist, with the following requirements: -# - small: For an x86_64 machine, with at least 2 vCPUs and 8 GB of memory. -# - medium: For an x86_64 machine, with at least 4 vCPUs and 16 GB of memory. -# - arm64: For an aarch64 machine, with at least 2 vCPUs and 8 GB of memory. -# -# CI jobs for the latter configuration can be run on x86_64 hardware -# by installing qemu-user-static, which works out of the box with -# podman or docker. Background: https://stackoverflow.com/a/72890225/313633 -# -# The above machine types are matched to each task by their label. Refer to the -# Cirrus CI docs for more details. -# -# When a contributor maintains a fork of the repo, any pull request they make -# to their own fork, or to the main repository, will trigger two CI runs: -# one for the branch push and one for the pull request. -# This can be avoided by setting SKIP_BRANCH_PUSH=true as a custom env variable -# in Cirrus repository settings, accessible from -# https://cirrus-ci.com/github/my-organization/my-repository -# -# On machines that are persisted between CI jobs, RESTART_CI_DOCKER_BEFORE_RUN=1 -# ensures that previous containers and artifacts are cleared before each run. -# This requires installing Podman instead of Docker. -# -# Futhermore: -# - podman-docker-4.1+ is required due to the bugfix in 4.1 -# (https://github.com/bitcoin/bitcoin/pull/21652#issuecomment-1657098200) -# - The ./ci/ dependencies (with cirrus-cli) should be installed. One-liner example -# for a single user setup with sudo permission: -# -# ``` -# apt update && apt install git screen python3 bash podman-docker uidmap slirp4netns curl -y && curl -L -o cirrus "https://github.com/cirruslabs/cirrus-cli/releases/latest/download/cirrus-linux-$(dpkg --print-architecture)" && mv cirrus /usr/local/bin/cirrus && chmod +x /usr/local/bin/cirrus -# ``` -# -# - There are no strict requirements on the hardware. Having fewer CPU threads -# than recommended merely causes the CI script to run slower. -# To avoid rare and intermittent OOM due to short memory usage spikes, -# it is recommended to add (and persist) swap: -# -# ``` -# fallocate -l 16G /swapfile_ci && chmod 600 /swapfile_ci && mkswap /swapfile_ci && swapon /swapfile_ci && ( echo '/swapfile_ci none swap sw 0 0' | tee -a /etc/fstab ) -# ``` -# -# - To register the persistent worker, open a `screen` session and run: -# -# ``` -# RESTART_CI_DOCKER_BEFORE_RUN=1 screen cirrus worker run --labels type=todo_fill_in_type --token todo_fill_in_token -# ``` - -# https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks -filter_template: &FILTER_TEMPLATE - # Allow forks to specify SKIP_BRANCH_PUSH=true and skip CI runs when a branch is pushed, - # but still run CI when a PR is created. - # https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution - skip: $SKIP_BRANCH_PUSH == "true" && $CIRRUS_PR == "" - stateful: false # https://cirrus-ci.org/guide/writing-tasks/#stateful-tasks - -base_template: &BASE_TEMPLATE - << : *FILTER_TEMPLATE - merge_base_script: - # Require git (used in fingerprint_script). - - git --version || ( apt-get update && apt-get install -y git ) - - if [ "$CIRRUS_PR" = "" ]; then exit 0; fi - - git fetch --depth=1 $CIRRUS_REPO_CLONE_URL "pull/${CIRRUS_PR}/merge" - - git checkout FETCH_HEAD # Use merged changes to detect silent merge conflicts - # Also, the merge commit is used to lint COMMIT_RANGE="HEAD~..HEAD" - -main_template: &MAIN_TEMPLATE - timeout_in: 120m # https://cirrus-ci.org/faq/#instance-timed-out - ci_script: - - ./ci/test_run_all.sh - -global_task_template: &GLOBAL_TASK_TEMPLATE - << : *BASE_TEMPLATE - << : *MAIN_TEMPLATE - -compute_credits_template: &CREDITS_TEMPLATE - # https://cirrus-ci.org/pricing/#compute-credits - # Only use credits for pull requests to the main repo - use_compute_credits: $CIRRUS_REPO_FULL_NAME == 'bitcoin/bitcoin' && $CIRRUS_PR != "" - -task: - name: 'lint' - << : *BASE_TEMPLATE - container: - image: debian:bookworm - cpu: 1 - memory: 1G - # For faster CI feedback, immediately schedule the linters - << : *CREDITS_TEMPLATE - test_runner_cache: - folder: "/lint_test_runner" - fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-parse HEAD:test/lint/test_runner) - python_cache: - folder: "/python_build" - fingerprint_script: cat .python-version /etc/os-release - unshallow_script: - - git fetch --unshallow --no-tags - lint_script: - - ./ci/lint_run_all.sh - -task: - name: 'tidy' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - env: - FILE_ENV: "./ci/test/00_setup_env_native_tidy.sh" - -task: - name: 'ARM, unit tests, no functional tests' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: arm64 # Use arm64 worker to sidestep qemu and avoid a slow CI: https://github.com/bitcoin/bitcoin/pull/28087#issuecomment-1649399453 - env: - FILE_ENV: "./ci/test/00_setup_env_arm.sh" - -task: - name: 'Win64-cross' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_win64.sh" - -task: - name: 'CentOS, depends, gui' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_native_centos.sh" - -task: - name: 'previous releases, depends DEBUG' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_native_previous_releases.sh" - -task: - name: 'TSan, depends, gui' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - env: - FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh" - -task: - name: 'MSan, depends' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - timeout_in: 300m # Use longer timeout for the *rare* case where a full build (llvm + msan + depends + ...) needs to be done. - env: - FILE_ENV: "./ci/test/00_setup_env_native_msan.sh" - -task: - name: 'fuzzer,address,undefined,integer, no depends' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - timeout_in: 240m # larger timeout, due to the high CPU demand - env: - FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh" - -task: - name: 'multiprocess, i686, DEBUG' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - env: - FILE_ENV: "./ci/test/00_setup_env_i686_multiprocess.sh" - -task: - name: 'no wallet, libbitcoinkernel' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh" - -task: - name: 'macOS-cross, gui, no tests' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_mac_cross.sh" diff --git a/.github/actions/configure-docker/action.yml b/.github/actions/configure-docker/action.yml new file mode 100644 index 0000000000..f0eefba314 --- /dev/null +++ b/.github/actions/configure-docker/action.yml @@ -0,0 +1,56 @@ +name: 'Configure Docker' +description: 'Set up Docker build driver and configure build cache args' +inputs: + use-cirrus: + description: 'Use cirrus cache' + required: true +runs: + using: 'composite' + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + # Use host network to allow access to cirrus gha cache running on the host + driver-opts: | + network=host + + # This is required to allow buildkit to access the actions cache + - name: Expose actions cache variables + uses: actions/github-script@v8 + with: + script: | + Object.keys(process.env).forEach(function (key) { + if (key.startsWith('ACTIONS_')) { + core.info(`Exporting ${key}`); + core.exportVariable(key, process.env[key]); + } + }); + + - name: Construct docker build cache args + shell: bash + run: | + # Configure docker build cache backend + # + # On forks the gha cache will work but will use Github's cache backend. + # Docker will check for variables $ACTIONS_CACHE_URL, $ACTIONS_RESULTS_URL and $ACTIONS_RUNTIME_TOKEN + # which are set automatically when running on GitHub infra: https://docs.docker.com/build/cache/backends/gha/#synopsis + + # Use cirrus cache host + if [[ ${{ inputs.use-cirrus }} == 'true' ]]; then + url_args="url=${CIRRUS_CACHE_HOST},url_v2=${CIRRUS_CACHE_HOST}" + else + url_args="" + fi + + # Always optimistically --cache‑from in case a cache blob exists + args=(--cache-from "type=gha${url_args:+,${url_args}},scope=${CONTAINER_NAME}") + + # Only add --cache-to when using the Cirrus cache provider and pushing to the default branch. + if [[ ${{ inputs.use-cirrus }} == 'true' && ${{ github.event_name }} == "push" && ${{ github.ref_name }} == ${{ github.event.repository.default_branch }} ]]; then + args+=(--cache-to "type=gha${url_args:+,${url_args}},mode=max,ignore-error=true,scope=${CONTAINER_NAME}") + fi + + # Always `--load` into docker images (needed when using the `docker-container` build driver). + args+=(--load) + + echo "DOCKER_BUILD_CACHE_ARG=${args[*]}" >> $GITHUB_ENV diff --git a/.github/actions/configure-environment/action.yml b/.github/actions/configure-environment/action.yml new file mode 100644 index 0000000000..e2a26b7184 --- /dev/null +++ b/.github/actions/configure-environment/action.yml @@ -0,0 +1,27 @@ +name: 'Configure environment' +description: 'Configure CI, cache and container name environment variables' +runs: + using: 'composite' + steps: + - name: Set CI and cache directories + shell: bash + run: | + echo "BASE_ROOT_DIR=${{ runner.temp }}" >> "$GITHUB_ENV" + echo "BASE_BUILD_DIR=${{ runner.temp }}/build" >> "$GITHUB_ENV" + echo "CCACHE_DIR=${{ runner.temp }}/ccache_dir" >> $GITHUB_ENV + echo "DEPENDS_DIR=${{ runner.temp }}/depends" >> "$GITHUB_ENV" + echo "BASE_CACHE=${{ runner.temp }}/depends/built" >> $GITHUB_ENV + echo "SOURCES_PATH=${{ runner.temp }}/depends/sources" >> $GITHUB_ENV + echo "PREVIOUS_RELEASES_DIR=${{ runner.temp }}/previous_releases" >> $GITHUB_ENV + + - name: Set cache hashes + shell: bash + run: | + echo "DEPENDS_HASH=$(git ls-tree HEAD depends "$FILE_ENV" | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV + echo "PREVIOUS_RELEASES_HASH=$(git ls-tree HEAD test/get_previous_releases.py | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV + + - name: Get container name + shell: bash + run: | + source $FILE_ENV + echo "CONTAINER_NAME=$CONTAINER_NAME" >> "$GITHUB_ENV" diff --git a/.github/actions/restore-caches/action.yml b/.github/actions/restore-caches/action.yml new file mode 100644 index 0000000000..8dc35d4902 --- /dev/null +++ b/.github/actions/restore-caches/action.yml @@ -0,0 +1,47 @@ +name: 'Restore Caches' +description: 'Restore ccache, depends sources, and built depends caches' +runs: + using: 'composite' + steps: + - name: Restore Ccache cache + id: ccache-cache + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.CCACHE_DIR }} + key: ccache-${{ env.CONTAINER_NAME }}-${{ github.run_id }} + restore-keys: | + ccache-${{ env.CONTAINER_NAME }}- + + - name: Restore depends sources cache + id: depends-sources + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.SOURCES_PATH }} + key: depends-sources-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + restore-keys: | + depends-sources-${{ env.CONTAINER_NAME }}- + + - name: Restore built depends cache + id: depends-built + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.BASE_CACHE }} + key: depends-built-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + restore-keys: | + depends-built-${{ env.CONTAINER_NAME }}- + + - name: Restore previous releases cache + id: previous-releases + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.PREVIOUS_RELEASES_DIR }} + key: previous-releases-${{ env.CONTAINER_NAME }}-${{ env.PREVIOUS_RELEASES_HASH }} + restore-keys: | + previous-releases-${{ env.CONTAINER_NAME }}- + + - name: export cache hits + shell: bash + run: | + echo "depends-sources-cache-hit=${{ steps.depends-sources.outputs.cache-hit }}" >> $GITHUB_ENV + echo "depends-built-cache-hit=${{ steps.depends-built.outputs.cache-hit }}" >> $GITHUB_ENV + echo "previous-releases-cache-hit=${{ steps.previous-releases.outputs.cache-hit }}" >> $GITHUB_ENV diff --git a/.github/actions/save-caches/action.yml b/.github/actions/save-caches/action.yml new file mode 100644 index 0000000000..0e3b31246c --- /dev/null +++ b/.github/actions/save-caches/action.yml @@ -0,0 +1,39 @@ +name: 'Save Caches' +description: 'Save ccache, depends sources, and built depends caches' +runs: + using: 'composite' + steps: + - name: debug cache hit inputs + shell: bash + run: | + echo "depends sources direct cache hit to primary key: ${{ env.depends-sources-cache-hit }}" + echo "depends built direct cache hit to primary key: ${{ env.depends-built-cache-hit }}" + echo "previous releases direct cache hit to primary key: ${{ env.previous-releases-cache-hit }}" + + - name: Save Ccache cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) }} + with: + path: ${{ env.CCACHE_DIR }} + key: ccache-${{ env.CONTAINER_NAME }}-${{ github.run_id }} + + - name: Save depends sources cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.depends-sources-cache-hit != 'true') }} + with: + path: ${{ env.SOURCES_PATH }} + key: depends-sources-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + + - name: Save built depends cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.depends-built-cache-hit != 'true' )}} + with: + path: ${{ env.BASE_CACHE }} + key: depends-built-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + + - name: Save previous releases cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.previous-releases-cache-hit != 'true' )}} + with: + path: ${{ env.PREVIOUS_RELEASES_DIR }} + key: previous-releases-${{ env.CONTAINER_NAME }}-${{ env.PREVIOUS_RELEASES_HASH }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f652bbbbd6..8f07bb0032 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,9 +18,26 @@ concurrency: env: CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error - MAKEJOBS: '-j8' + CIRRUS_CACHE_HOST: http://127.0.0.1:12321/ # When using Cirrus Runners this host can be used by the docker `gha` build cache type. + REPO_USE_CIRRUS_RUNNERS: 'bitcoin/bitcoin' # Use cirrus runners and cache for this repo, instead of falling back to the slow GHA runners jobs: + runners: + name: 'determine runners' + runs-on: ubuntu-latest + outputs: + use-cirrus-runners: ${{ steps.runners.outputs.use-cirrus-runners }} + steps: + - id: runners + run: | + if [[ "${REPO_USE_CIRRUS_RUNNERS}" == "${{ github.repository }}" ]]; then + echo "use-cirrus-runners=true" >> "$GITHUB_OUTPUT" + echo "::notice title=Runner Selection::Using Cirrus Runners" + else + echo "use-cirrus-runners=false" >> "$GITHUB_OUTPUT" + echo "::notice title=Runner Selection::Using GitHub-hosted runners" + fi + test-each-commit: name: 'test each commit' runs-on: ubuntu-24.04 @@ -31,7 +48,7 @@ jobs: steps: - name: Determine fetch depth run: echo "FETCH_DEPTH=$((${{ github.event.pull_request.commits }} + 2))" >> "$GITHUB_ENV" - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: ${{ env.FETCH_DEPTH }} @@ -105,8 +122,12 @@ jobs: BASE_ROOT_DIR: ${{ github.workspace }} steps: - - name: Checkout - uses: actions/checkout@v4 + - &CHECKOUT + name: Checkout + uses: actions/checkout@v6 + with: + # Ensure the latest merged pull request state is used, even on re-runs. + ref: &CHECKOUT_REF_TMPL ${{ github.event_name == 'pull_request' && github.ref || '' }} - name: Clang version run: | @@ -142,7 +163,7 @@ jobs: FILE_ENV: ${{ matrix.file-env }} - name: Save Ccache cache - uses: actions/cache/save@v4 + uses: actions/cache/save@v5 if: github.event_name != 'pull_request' && steps.ccache-cache.outputs.cache-hit != 'true' with: path: ${{ env.CCACHE_DIR }} @@ -177,14 +198,17 @@ jobs: job-name: 'Win64 native fuzz, VS 2022' steps: - - name: Checkout - uses: actions/checkout@v4 + - *CHECKOUT - - name: Configure Developer Command Prompt for Microsoft Visual C++ - # Using microsoft/setup-msbuild is not enough. - uses: ilammy/msvc-dev-cmd@v1 - with: - arch: x64 + - name: Set up VS Developer Prompt + shell: pwsh -Command "$PSVersionTable; $PSNativeCommandUseErrorActionPreference = $true; $ErrorActionPreference = 'Stop'; & '{0}'" + run: | + $vswherePath = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" + $installationPath = & $vswherePath -latest -property installationPath + & "${env:COMSPEC}" /s /c "`"$installationPath\Common7\Tools\vsdevcmd.bat`" -arch=x64 -no_logo && set" | foreach-object { + $name, $value = $_ -split '=', 2 + echo "$name=$value" >> $env:GITHUB_ENV + } - name: Get tool information run: | @@ -204,13 +228,13 @@ jobs: sed -i '1s/^/set(ENV{CMAKE_POLICY_VERSION_MINIMUM} 3.5)\n/' "${VCPKG_INSTALLATION_ROOT}/scripts/ports.cmake" - name: vcpkg tools cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: C:/vcpkg/downloads/tools key: ${{ github.job }}-vcpkg-tools - name: Restore vcpkg binary cache - uses: actions/cache/restore@v4 + uses: actions/cache/restore@v5 id: vcpkg-binary-cache with: path: ~/AppData/Local/vcpkg/archives @@ -221,7 +245,7 @@ jobs: cmake -B build --preset vs2022-static -DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_INSTALLATION_ROOT\scripts\buildsystems\vcpkg.cmake" ${{ matrix.generate-options }} - name: Save vcpkg binary cache - uses: actions/cache/save@v4 + uses: actions/cache/save@v5 if: github.event_name != 'pull_request' && steps.vcpkg-binary-cache.outputs.cache-hit != 'true' && matrix.job-type == 'standard' with: path: ~/AppData/Local/vcpkg/archives @@ -269,44 +293,151 @@ jobs: - name: Download all workflow run artifacts uses: actions/download-artifact@v4 - asan-lsan-ubsan-integer-no-depends-usdt: - name: 'ASan + LSan + UBSan + integer, no depends, USDT' - runs-on: ubuntu-24.04 # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools + ci-matrix: + name: ${{ matrix.name }} + needs: runners + runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && matrix.cirrus-runner || matrix.fallback-runner }} if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} - timeout-minutes: 120 + timeout-minutes: ${{ matrix.timeout-minutes }} + env: - FILE_ENV: "./ci/test/00_setup_env_native_asan.sh" DANGER_CI_ON_HOST_FOLDERS: 1 + FILE_ENV: ${{ matrix.file-env }} + + strategy: + fail-fast: false + matrix: + include: + - name: '32 bit ARM, unit tests, no functional tests' + cirrus-runner: 'ubuntu-24.04-arm' # Cirrus' Arm runners are Apple (with virtual Linux aarch64), which doesn't support 32-bit mode + fallback-runner: 'ubuntu-24.04-arm' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_arm.sh' + + - name: 'win64 Cross' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_win64.sh' + + - name: 'ASan + LSan + UBSan + integer, no depends, USDT' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_asan.sh' + + - name: 'macOS-cross, gui, no tests' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_mac_cross.sh' + + - name: 'No wallet, libbitcoinkernel' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh' + + - name: 'i686, multiprocess, DEBUG' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_i686_multiprocess.sh' + + - name: 'fuzzer,address,undefined,integer, no depends' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 240 + file-env: './ci/test/00_setup_env_native_fuzz.sh' + + - name: 'previous releases, depends DEBUG' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_previous_releases.sh' + + - name: 'CentOS, depends, gui' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_centos.sh' + + - name: 'tidy' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_tidy.sh' + + - name: 'TSan, depends, no gui' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_tsan.sh' + + - name: 'MSan, depends' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_msan.sh' + steps: - - name: Checkout - uses: actions/checkout@v4 + - *CHECKOUT - - name: Set CI directories - run: | - echo "CCACHE_DIR=${{ runner.temp }}/ccache_dir" >> "$GITHUB_ENV" - echo "BASE_ROOT_DIR=${{ runner.temp }}" >> "$GITHUB_ENV" - echo "BASE_BUILD_DIR=${{ runner.temp }}/build-asan" >> "$GITHUB_ENV" + - name: Configure environment + uses: ./.github/actions/configure-environment - - name: Restore Ccache cache - id: ccache-cache - uses: actions/cache/restore@v4 + - name: Restore caches + id: restore-cache + uses: ./.github/actions/restore-caches + + - name: Configure Docker + uses: ./.github/actions/configure-docker with: - path: ${{ env.CCACHE_DIR }} - key: ${{ github.job }}-ccache-${{ github.run_id }} - restore-keys: ${{ github.job }}-ccache- + use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }} - name: Enable bpfcc script + if: ${{ env.CONTAINER_NAME == 'ci_native_asan' }} # In the image build step, no external environment variables are available, # so any settings will need to be written to the settings env file: run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh + - name: Set mmap_rnd_bits + if: ${{ env.CONTAINER_NAME == 'ci_native_tsan' || env.CONTAINER_NAME == 'ci_native_msan' }} + # Prevents crashes due to high ASLR entropy + run: sudo sysctl -w vm.mmap_rnd_bits=28 + - name: CI script run: ./ci/test_run_all.sh - - name: Save Ccache cache - uses: actions/cache/save@v4 - if: github.event_name != 'pull_request' && steps.ccache-cache.outputs.cache-hit != 'true' + - name: Save caches + uses: ./.github/actions/save-caches + + lint: + name: 'lint' + needs: runners + runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-xs' || 'ubuntu-24.04' }} + if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} + timeout-minutes: 20 + env: + CONTAINER_NAME: "bitcoin-linter" + steps: + - name: Checkout + uses: actions/checkout@v6 with: - path: ${{ env.CCACHE_DIR }} - # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache - key: ${{ github.job }}-ccache-${{ github.run_id }} + ref: *CHECKOUT_REF_TMPL + fetch-depth: 0 + + - name: Configure Docker + uses: ./.github/actions/configure-docker + with: + use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }} + + - name: CI script + run: | + set -o xtrace + docker buildx build -t "$CONTAINER_NAME" $DOCKER_BUILD_CACHE_ARG --file "./ci/lint_imagefile" . + CIRRUS_PR_FLAG="" + if [ "${{ github.event_name }}" = "pull_request" ]; then + CIRRUS_PR_FLAG="-e CIRRUS_PR=1" + fi + docker run --rm $CIRRUS_PR_FLAG -v "$(pwd)":/bitcoin "$CONTAINER_NAME" diff --git a/CMakeLists.txt b/CMakeLists.txt index 163a2f71df..034711a925 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,7 +26,7 @@ get_directory_property(precious_variables CACHE_VARIABLES) #============================= # Project / Package metadata #============================= -set(CLIENT_NAME "Bitcoin PyBLOCK") +set(CLIENT_NAME "PyBLOCK +BIP-110") set(CLIENT_VERSION_MAJOR 77) set(CLIENT_VERSION_MINOR 0) set(CLIENT_VERSION_BUILD 0) @@ -65,7 +65,7 @@ if(CLIENT_VERSION_RC GREATER 0) endif() set(COPYRIGHT_HOLDERS "The %s Cypherpunks") -set(COPYRIGHT_HOLDERS_FINAL "The ${CLIENT_NAME} developers") +set(COPYRIGHT_HOLDERS_FINAL "The ${CLIENT_NAME} Spammers") set(CLIENT_BUGREPORT "https://github.com/PyBLOCK-Bitcoin/bitcoin") #============================= @@ -178,7 +178,7 @@ if(WITH_BDB) "BDB (legacy) wallets opened by this build will not be portable!" ) if(WARN_INCOMPATIBLE_BDB) - message(WARNING "If this is intended, pass \"-DWARN_INCOMPATIBLE_BDB=OFF\".\n" + message(FATAL_ERROR "If this is intended, pass \"-DWARN_INCOMPATIBLE_BDB=OFF\".\n" "Passing \"-DWITH_BDB=OFF\" will suppress this warning." ) endif() diff --git a/ci/README.md b/ci/README.md index 377aae7fa0..81e048ce68 100644 --- a/ci/README.md +++ b/ci/README.md @@ -1,8 +1,8 @@ -## CI Scripts +# CI Scripts This directory contains scripts for each build step in each build stage. -### Running a Stage Locally +## Running a Stage Locally Be aware that the tests will be built and run in-place, so please run at your own risk. If the repository is not a fresh git clone, you might have to clean files from previous builds or test runs first. @@ -27,7 +27,7 @@ with a specific configuration, env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh' ``` -### Configurations +## Configurations The test files (`FILE_ENV`) are constructed to test a wide range of configurations, rather than a single pass/fail. This helps to catch build @@ -49,8 +49,32 @@ env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'MAKEJOBS="-j1" FILE_ENV=" The files starting with `0n` (`n` greater than 0) are the scripts that are run in order. -### Cache +## Cache In order to avoid rebuilding all dependencies for each build, the binaries are cached and reused when possible. Changes in the dependency-generator will trigger cache-invalidation and rebuilds as necessary. + +## Configuring a repository for CI + +### Primary repository + +To configure the primary repository, follow these steps: + +1. Register with [Cirrus Runners](https://cirrus-runners.app/) and purchase runners. +2. Install the Cirrus Runners GitHub app against the GitHub organization. +3. Enable organisation-level runners to be used in public repositories: + 1. `Org settings -> Actions -> Runner Groups -> Default -> Allow public repos` +4. Permit the following actions to run: + 1. cirruslabs/cache/restore@\* + 1. cirruslabs/cache/save@\* + 1. docker/setup-buildx-action@\* + 1. actions/github-script@\* + +### Forked repositories + +When used in a fork the CI will run on GitHub's free hosted runners by default. +In this case, due to GitHub's 10GB-per-repo cache size limitations caches will be frequently evicted and missed, but the workflows will run (slowly). + +It is also possible to use your own Cirrus Runners in your own fork with an appropriate patch to the `REPO_USE_CIRRUS_RUNNERS` variable in ../.github/workflows/ci.yml +NB that Cirrus Runners only work at an organisation level, therefore in order to use your own Cirrus Runners, *the fork must be within your own organisation*. diff --git a/ci/lint_run_all.sh b/ci/lint_run_all.sh deleted file mode 100755 index c57261d21a..0000000000 --- a/ci/lint_run_all.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019-present The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -# Only used in .cirrus.yml. Refer to test/lint/README.md on how to run locally. - -cp "./ci/retry/retry" "/ci_retry" -cp "./.python-version" "/.python-version" -mkdir --parents "/test/lint" -cp --recursive "./test/lint/test_runner" "/test/lint/" -set -o errexit; source ./ci/lint/04_install.sh -set -o errexit -./ci/lint/06_script.sh diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh index 12074e7006..7779adbb49 100755 --- a/ci/test/00_setup_env.sh +++ b/ci/test/00_setup_env.sh @@ -35,7 +35,7 @@ fi echo "Fallback to default values in env (if not yet set)" # The number of parallel jobs to pass down to make and test_runner.py -export MAKEJOBS=${MAKEJOBS:--j4} +export MAKEJOBS=${MAKEJOBS:--j$(if command -v nproc > /dev/null 2>&1; then nproc; else sysctl -n hw.logicalcpu; fi)} # Whether to prefer BusyBox over GNU utilities export USE_BUSY_BOX=${USE_BUSY_BOX:-false} diff --git a/ci/test/00_setup_env_mac_native.sh b/ci/test/00_setup_env_mac_native.sh index bebd58ad40..197f11d6f7 100755 --- a/ci/test/00_setup_env_mac_native.sh +++ b/ci/test/00_setup_env_mac_native.sh @@ -8,6 +8,7 @@ export LC_ALL=C.UTF-8 # Homebrew's python@3.12 is marked as externally managed (PEP 668). # Therefore, `--break-system-packages` is needed. +export CONTAINER_NAME="ci_mac_native" # macos does not use a container, but the env var is needed for logging export PIP_PACKAGES="--break-system-packages zmq" export GOAL="install" export CMAKE_GENERATOR="Ninja" diff --git a/ci/test/00_setup_env_mac_native_fuzz.sh b/ci/test/00_setup_env_mac_native_fuzz.sh index cacf2423ac..22b6bc97ab 100755 --- a/ci/test/00_setup_env_mac_native_fuzz.sh +++ b/ci/test/00_setup_env_mac_native_fuzz.sh @@ -6,6 +6,7 @@ export LC_ALL=C.UTF-8 +export CONTAINER_NAME="ci_mac_native_fuzz" # macos does not use a container, but the env var is needed for logging export CMAKE_GENERATOR="Ninja" export BITCOIN_CONFIG="-DBUILD_FOR_FUZZING=ON" export CI_OS_NAME="macos" diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index c56c1ef3a2..2ad48f8eb4 100755 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -19,15 +19,15 @@ else fi export CONTAINER_NAME=ci_native_asan -export APT_LLVM_V="20" +export APT_LLVM_V="21" export PACKAGES="systemtap-sdt-dev clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev python3-zmq qtbase5-dev qttools5-dev qttools5-dev-tools libevent-dev libboost-dev libdb5.3++-dev libminiupnpc-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}" export NO_DEPENDS=1 export GOAL="install" export BITCOIN_CONFIG="\ -DWITH_USDT=ON -DWITH_ZMQ=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF -DBUILD_GUI=ON \ -DSANITIZERS=address,float-divide-by-zero,integer,undefined \ - -DCMAKE_C_COMPILER=clang-${APT_LLVM_V} \ - -DCMAKE_CXX_COMPILER=clang++-${APT_LLVM_V} \ + -DCMAKE_C_COMPILER=clang \ + -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \ -DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern -Wno-error=deprecated-declarations' \ -DAPPEND_CXXFLAGS='-std=c++23' \ diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index c5220211fc..d81cbcf228 100755 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" export CONTAINER_NAME=ci_native_fuzz -export APT_LLVM_V="20" +export APT_LLVM_V="21" export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libevent-dev libboost-dev libsqlite3-dev" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false @@ -19,9 +19,8 @@ export CI_CONTAINER_CAP="--cap-add SYS_PTRACE" # If run with (ASan + LSan), the export BITCOIN_CONFIG="\ -DBUILD_FOR_FUZZING=ON \ -DSANITIZERS=fuzzer,address,undefined,float-divide-by-zero,integer \ - -DCMAKE_C_COMPILER=clang-${APT_LLVM_V} \ - -DCMAKE_CXX_COMPILER=clang++-${APT_LLVM_V} \ + -DCMAKE_C_COMPILER=clang \ + -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \ -DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern' \ " -export LLVM_SYMBOLIZER_PATH="/usr/bin/llvm-symbolizer-${APT_LLVM_V}" diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh index a6e53dc8a2..655fe609c0 100755 --- a/ci/test/00_setup_env_native_fuzz_with_msan.sh +++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh @@ -7,14 +7,16 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -LIBCXX_DIR="/msan/cxx_build/" +export APT_LLVM_V="21" +LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" -LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" +# -lstdc++ to resolve link issues due to upstream packaging +LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument -lstdc++" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" export CONTAINER_NAME="ci_native_fuzz_msan" -export PACKAGES="ninja-build" # BDB generates false-positives and will be removed in future +export PACKAGES="ninja-build clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev" export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" export GOAL="all" # Setting CMAKE_{C,CXX}_FLAGS_DEBUG flags to an empty string ensures that the flags set in MSAN_FLAGS remain unaltered. @@ -27,7 +29,7 @@ export BITCOIN_CONFIG="\ -DSANITIZERS=fuzzer,memory \ -DAPPEND_CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE -U_FORTIFY_SOURCE' \ " -export USE_MEMORY_SANITIZER="true" +export USE_INSTRUMENTED_LIBCPP="MemoryWithOrigins" export RUN_UNIT_TESTS="false" export RUN_FUNCTIONAL_TESTS="false" export RUN_FUZZ_TESTS=true diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh index 8784aaa5b7..879e82d55a 100755 --- a/ci/test/00_setup_env_native_msan.sh +++ b/ci/test/00_setup_env_native_msan.sh @@ -7,13 +7,14 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -LIBCXX_DIR="/msan/cxx_build/" +export APT_LLVM_V="21" +LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" export CONTAINER_NAME="ci_native_msan" -export PACKAGES="ninja-build" +export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev ninja-build" # BDB generates false-positives and will be removed in future export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" export GOAL="install" @@ -26,4 +27,4 @@ export BITCOIN_CONFIG="\ -DSANITIZERS=memory \ -DAPPEND_CPPFLAGS='-U_FORTIFY_SOURCE' \ " -export USE_MEMORY_SANITIZER="true" +export USE_INSTRUMENTED_LIBCPP="MemoryWithOrigins" diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index b341adfec5..6286e39d84 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -8,9 +8,12 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_tsan export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -export APT_LLVM_V="20" -export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libc++abi-${APT_LLVM_V}-dev libc++-${APT_LLVM_V}-dev python3-zmq" -export DEP_OPTS="CC=clang-${APT_LLVM_V} CXX='clang++-${APT_LLVM_V} -stdlib=libc++'" +export APT_LLVM_V="21" +LIBCXX_DIR="/cxx_build/" +LIBCXX_FLAGS="-fsanitize=thread -nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" +export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev python3-zmq ninja-build" +export DEP_OPTS="CC=clang CXX=clang++ CXXFLAGS='${LIBCXX_FLAGS}' NO_QT=1" export GOAL="install" export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DSANITIZERS=thread \ --DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'" +-DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'" +export USE_INSTRUMENTED_LIBCPP="Thread" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 1344563268..65f68351c8 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -43,32 +43,24 @@ elif [ "$CI_OS_NAME" != "macos" ]; then ${CI_RETRY_EXE} bash -c "apt-get install --no-install-recommends --no-upgrade -y $PACKAGES $CI_BASE_PACKAGES" fi +if [ -n "${APT_LLVM_V}" ]; then + update-alternatives --install /usr/bin/clang++ clang++ "/usr/bin/clang++-${APT_LLVM_V}" 100 + update-alternatives --install /usr/bin/clang clang "/usr/bin/clang-${APT_LLVM_V}" 100 + update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer "/usr/bin/llvm-symbolizer-${APT_LLVM_V}" 100 +fi + if [ -n "$PIP_PACKAGES" ]; then # shellcheck disable=SC2086 ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES fi -if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.0" /msan/llvm-project - - cmake -G Ninja -B /msan/clang_build/ \ - -DLLVM_ENABLE_PROJECTS="clang" \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_TARGETS_TO_BUILD=Native \ - -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ - -S /msan/llvm-project/llvm - - ninja -C /msan/clang_build/ "$MAKEJOBS" - ninja -C /msan/clang_build/ install-runtimes - - update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100 - update-alternatives --install /usr/bin/clang clang /msan/clang_build/bin/clang 100 - update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100 +if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.1" /llvm-project - cmake -G Ninja -B /msan/cxx_build/ \ + cmake -G Ninja -B /cxx_build/ \ -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \ -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_USE_SANITIZER=MemoryWithOrigins \ + -DLLVM_USE_SANITIZER="${USE_INSTRUMENTED_LIBCPP}" \ -DCMAKE_C_COMPILER=clang \ -DCMAKE_CXX_COMPILER=clang++ \ -DLLVM_TARGETS_TO_BUILD=Native \ @@ -76,13 +68,13 @@ if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then -DLIBCXXABI_USE_LLVM_UNWINDER=OFF \ -DLIBCXX_ABI_DEFINES="_LIBCPP_ABI_BOUNDED_ITERATORS;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_STD_ARRAY;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_STRING;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_VECTOR;_LIBCPP_ABI_BOUNDED_UNIQUE_PTR" \ -DLIBCXX_HARDENING_MODE=debug \ - -S /msan/llvm-project/runtimes + -S /llvm-project/runtimes - ninja -C /msan/cxx_build/ "$MAKEJOBS" + ninja -C /cxx_build/ "$MAKEJOBS" # Clear no longer needed source folder - du -sh /msan/llvm-project - rm -rf /msan/llvm-project + du -sh /llvm-project + rm -rf /llvm-project fi if [[ "${RUN_TIDY}" == "true" ]]; then diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh index 8351fd4e02..131b3c6148 100755 --- a/ci/test/02_run_container.sh +++ b/ci/test/02_run_container.sh @@ -23,34 +23,14 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then fi echo "Creating $CI_IMAGE_NAME_TAG container to run in" - DOCKER_BUILD_CACHE_ARG="" - DOCKER_BUILD_CACHE_TEMPDIR="" - DOCKER_BUILD_CACHE_OLD_DIR="" - DOCKER_BUILD_CACHE_NEW_DIR="" - # If set, use an `docker build` cache directory on the CI host - # to cache docker image layers for the CI container image. - # This cache can be multiple GB in size. Prefixed with DANGER - # as setting it removes (old cache) files from the host. - if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then - # Directory where the current cache for this run could be. If not existing - # or empty, "docker build" will warn, but treat it as cache-miss and continue. - DOCKER_BUILD_CACHE_OLD_DIR="${DANGER_DOCKER_BUILD_CACHE_HOST_DIR}/${CONTAINER_NAME}" - # Temporary directory for a newly created cache. We can't write the new - # cache into OLD_DIR directly, as old cache layers would not be removed. - # The NEW_DIR contents are moved to OLD_DIR after OLD_DIR has been cleared. - # This happens after `docker build`. If a task fails or is aborted, the - # DOCKER_BUILD_CACHE_TEMPDIR might be retained on the host. If the host isn't - # ephemeral, it has to take care of cleaning old TEMPDIR's up. - DOCKER_BUILD_CACHE_TEMPDIR="$(mktemp --directory ci-docker-build-cache-XXXXXXXXXX)" - DOCKER_BUILD_CACHE_NEW_DIR="${DOCKER_BUILD_CACHE_TEMPDIR}/${CONTAINER_NAME}" - DOCKER_BUILD_CACHE_ARG="--cache-from type=local,src=${DOCKER_BUILD_CACHE_OLD_DIR} --cache-to type=local,dest=${DOCKER_BUILD_CACHE_NEW_DIR},mode=max" - fi - + # Use buildx unconditionally + # Using buildx is required to properly load the correct driver, for use with registry caching. Neither build, nor BUILDKIT=1 currently do this properly # shellcheck disable=SC2086 - DOCKER_BUILDKIT=1 docker build \ + docker buildx build \ --file "${BASE_READ_ONLY_DIR}/ci/test_imagefile" \ --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \ --build-arg "FILE_ENV=${FILE_ENV}" \ + --build-arg "BASE_ROOT_DIR=${BASE_ROOT_DIR}" \ $MAYBE_CPUSET \ --platform="${CI_IMAGE_PLATFORM}" \ --label="${CI_IMAGE_LABEL}" \ @@ -58,15 +38,6 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then $DOCKER_BUILD_CACHE_ARG \ "${BASE_READ_ONLY_DIR}" - if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then - if [ -e "${DOCKER_BUILD_CACHE_NEW_DIR}/index.json" ]; then - echo "Removing the existing docker build cache in ${DOCKER_BUILD_CACHE_OLD_DIR}" - rm -rf "${DOCKER_BUILD_CACHE_OLD_DIR}" - echo "Moving the contents of ${DOCKER_BUILD_CACHE_NEW_DIR} to ${DOCKER_BUILD_CACHE_OLD_DIR}" - mv "${DOCKER_BUILD_CACHE_NEW_DIR}" "${DOCKER_BUILD_CACHE_OLD_DIR}" - fi - fi - docker volume create "${CONTAINER_NAME}_ccache" || true docker volume create "${CONTAINER_NAME}_depends" || true docker volume create "${CONTAINER_NAME}_depends_sources" || true diff --git a/ci/test/03_test_script.sh b/ci/test/03_test_script.sh index b218e7b9d1..36f8b9dfc2 100755 --- a/ci/test/03_test_script.sh +++ b/ci/test/03_test_script.sh @@ -24,6 +24,14 @@ fi echo "Free disk space:" df -h +# We force an install of linux-headers again here via $PACKAGES to fix any +# kernel mismatch between a cached docker image and the underlying host. +# This can happen occasionally on hosted runners if the runner image is updated. +if [[ "$CONTAINER_NAME" == "ci_native_asan" ]]; then + $CI_RETRY_EXE apt-get update + ${CI_RETRY_EXE} bash -c "apt-get install --no-install-recommends --no-upgrade -y $PACKAGES" +fi + # What host to compile for. See also ./depends/README.md # Tests that need cross-compilation export the appropriate HOST. # Tests that run natively guess the host @@ -129,6 +137,12 @@ bash -c "cmake -S $BASE_ROOT_DIR $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( (cat $ bash -c "cmake --build . $MAKEJOBS --target all $GOAL" || ( echo "Build failure. Verbose build follows." && cmake --build . --target all "$GOAL" --verbose ; false ) bash -c "${PRINT_CCACHE_STATISTICS}" +if [ "$CI" = "true" ]; then + hit_rate=$(ccache -s | grep "Hits:" | head -1 | sed 's/.*(\(.*\)%).*/\1/') + if [ "${hit_rate%.*}" -lt 75 ]; then + echo "::notice title=low ccache hitrate::Ccache hit-rate in $CONTAINER_NAME was $hit_rate%" + fi +fi du -sh "${DEPENDS_DIR}"/*/ du -sh "${PREVIOUS_RELEASES_DIR}" diff --git a/ci/test_imagefile b/ci/test_imagefile index f8b5eea1c8..f9cf3187a2 100644 --- a/ci/test_imagefile +++ b/ci/test_imagefile @@ -4,12 +4,16 @@ # See ci/README.md for usage. -ARG CI_IMAGE_NAME_TAG +# We never want scratch, but default arg silences a Warning +ARG CI_IMAGE_NAME_TAG=scratch FROM ${CI_IMAGE_NAME_TAG} ARG FILE_ENV ENV FILE_ENV=${FILE_ENV} +ARG BASE_ROOT_DIR +ENV BASE_ROOT_DIR=${BASE_ROOT_DIR} + COPY ./ci/retry/retry /usr/bin/retry COPY ./ci/test/00_setup_env.sh ./${FILE_ENV} ./ci/test/01_base_install.sh /ci_container_base/ci/test/ diff --git a/cmake/module/FindUSDT.cmake b/cmake/module/FindUSDT.cmake index 0be7c28ff5..234a099f3f 100644 --- a/cmake/module/FindUSDT.cmake +++ b/cmake/module/FindUSDT.cmake @@ -36,6 +36,10 @@ if(USDT_INCLUDE_DIR) include(CheckCXXSourceCompiles) set(CMAKE_REQUIRED_INCLUDES ${USDT_INCLUDE_DIR}) check_cxx_source_compiles(" + #if defined(__arm__) + # define STAP_SDT_ARG_CONSTRAINT g + #endif + // Setting SDT_USE_VARIADIC lets systemtap (sys/sdt.h) know that we want to use // the optional variadic macros to define tracepoints. #define SDT_USE_VARIADIC 1 diff --git a/contrib/completions/zsh/bitcoin-cli.zsh b/contrib/completions/zsh/bitcoin-cli.zsh new file mode 100644 index 0000000000..f393e2c523 --- /dev/null +++ b/contrib/completions/zsh/bitcoin-cli.zsh @@ -0,0 +1,144 @@ +#compdef bitcoin-cli +# zsh completion for bitcoin-cli(1) +# DO NOT EDIT THIS FILE BY HAND -- THIS WILL FAIL THE FUNCTIONAL TEST tool_cli_completion +# This file is auto-generated by the functional test tool_cli_completion. +# If you want to modify this file, modify test/functional/tool_cli_completion.py and re-autogenerate +# this file via the --overwrite test flag. + +# Copyright (c) 2012-2024 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# call bitcoin-cli for RPC +_bitcoin_rpc() { + # determine already specified args necessary for RPC + local rpcargs=() + local -a words_array + words_array=(${(z)BUFFER}) + + for i in $words_array; do + case "$i" in + -conf=*|-datadir=*|-regtest|-rpc*|-testnet|-testnet4) + rpcargs+=("$i") + ;; + esac + done + + $bitcoin_cli "${rpcargs[@]}" "$@" +} + +_bitcoin-cli() { + local context state line + local bitcoin_cli="$words[1]" + + if (( CURRENT > 6 )); then + case ${words[CURRENT-5]} in + descriptorprocesspsbt) + _values 'arg' 'false' 'true' + return 0 + ;; + esac + fi + + if (( CURRENT > 5 )); then + case ${words[CURRENT-4]} in + createpsbt|createrawtransaction|descriptorprocesspsbt|setban) + _values 'arg' 'false' 'true' + return 0 + ;; + signrawtransactionwithkey) + _values 'arg' 'ALL' 'ALL|ANYONECANPAY' 'NONE' 'NONE|ANYONECANPAY' 'SINGLE' 'SINGLE|ANYONECANPAY' + return 0 + ;; + esac + fi + + if (( CURRENT > 4 )); then + case ${words[CURRENT-3]} in + addnode|converttopsbt|getdescriptoractivity|gettxout|gettxoutsetinfo) + _values 'arg' 'false' 'true' + return 0 + ;; + esac + fi + + if (( CURRENT > 3 )); then + case ${words[CURRENT-2]} in + converttopsbt|decoderawtransaction|finalizepsbt|getblockheader|getmempoolancestors|getmempooldescendants|getrawmempool|listmempooltransactions) + _values 'arg' 'false' 'true' + return 0 + ;; + addnode) + _values 'arg' 'add' 'onetry' 'remove' + return 0 + ;; + setban) + _values 'arg' 'add' 'remove' + return 0 + ;; + estimatesmartfee) + _values 'arg' 'CONSERVATIVE' 'ECONOMICAL' 'UNSET' + return 0 + ;; + esac + fi + + # Handle previous word completions + case "${words[CURRENT-1]}" in + dumptxoutset|importmempool|loadtxoutset) + _files + return 0 + ;; + getrawmempool|setnetworkactive|setscriptthreadsenabled) + _values 'arg' 'false' 'true' + return 0 + ;; + esac + # Handle current word completions + case "$words[CURRENT]" in + -conf=*) + local conf_path=${words[CURRENT]#-conf=} + _files -W ${conf_path:h} -g "*" + return 0 + ;; + -datadir=*) + local datadir_path=${words[CURRENT]#-datadir=} + _files -/ -W ${datadir_path:h} + return 0 + ;; + -*=*) + # prevent nonsense completions + return 0 + ;; + *) + local helpopts commands + local -a opts + + # only parse -help if sensible (empty or starts with -) + if [[ -z "$words[CURRENT]" || "$words[CURRENT]" == -* ]]; then + helpopts="$($bitcoin_cli -help 2>&1 | awk '$1 ~ /^-/ { sub(/=.*/, "="); print $1 }')" + opts+=(${(f)helpopts}) + fi + + # only parse help if sensible (empty or starts with letter) + if [[ -z "$words[CURRENT]" || "$words[CURRENT]" == [a-z]* ]]; then + commands="$(_bitcoin_rpc help 2>/dev/null | awk '$1 ~ /^[a-z]/ { print $1; }')" + opts+=(${(f)commands}) + fi + + _describe 'bitcoin-cli options and commands' opts + + return 0 + ;; + esac +} + +# Function is now defined and will be called by zsh completion system + +# Local variables: +# mode: shell-script +# sh-basic-offset: 4 +# sh-indent-comment: t +# indent-tabs-mode: nil +# End: +# ex: ts=4 sw=4 et filetype=sh diff --git a/contrib/docker/Dockerfile b/contrib/docker/Dockerfile new file mode 100644 index 0000000000..a3088a5b72 --- /dev/null +++ b/contrib/docker/Dockerfile @@ -0,0 +1,71 @@ +FROM alpine:3.22 AS builder + +RUN apk add --no-cache \ + build-base \ + cmake \ + boost-dev \ + libevent-dev \ + sqlite-dev \ + zeromq-dev \ + coreutils \ + binutils + +WORKDIR /opt/bitcoin + +COPY . . + +WORKDIR /opt/bitcoin/build + +RUN cmake .. \ + -DCMAKE_INSTALL_PREFIX="/usr/local/" \ + -DBUILD_DAEMON="ON" \ + -DBUILD_CLI="ON" \ + -DENABLE_WALLET="ON" \ + -DWITH_ZMQ="ON" \ + -DBUILD_TESTS="ON" \ + -DBUILD_GUI="OFF" \ + -DBUILD_TX="OFF" \ + -DBUILD_UTIL="OFF" \ + -DBUILD_WALLET_TOOL="OFF" \ + -DBUILD_BENCH="OFF" \ + -DBUILD_FUZZ_BINARY="OFF" \ + -DBUILD_UTIL_CHAINSTATE="OFF" \ + -DWITH_BDB="OFF" \ + -DWITH_USDT="OFF" \ + -DINSTALL_MAN="OFF" \ + -DWITH_CCACHE="OFF" + +RUN cmake --build . --parallel $(nproc) +RUN ctest --output-on-failure +RUN cmake --install . + +RUN strip --strip-unneeded /usr/local/bin/* + +FROM alpine:3.22 AS final + +ARG USER_ID=1000 +ARG GROUP_ID=1000 + +RUN apk add --no-cache \ + libevent \ + sqlite-libs \ + zeromq \ + boost-system \ + boost-filesystem \ + boost-program_options + +COPY --from=builder /usr/local/bin/bitcoind /usr/local/bin/bitcoind +COPY --from=builder /usr/local/bin/bitcoin-cli /usr/local/bin/bitcoin-cli + +RUN addgroup -S -g ${GROUP_ID} bitcoin && \ + adduser -S -u ${USER_ID} -G bitcoin -H -s /bin/false bitcoin + +WORKDIR /var/lib/bitcoind +EXPOSE 8333 8332 +USER bitcoin +VOLUME ["/var/lib/bitcoind", "/etc/bitcoin/bitcoin.conf"] + +ENTRYPOINT ["bitcoind", "-conf=/etc/bitcoin/bitcoin.conf", "-datadir=/var/lib/bitcoind"] + +HEALTHCHECK --interval=5m --timeout=15s --start-period=2m --start-interval=10s \ + CMD ["bitcoin-cli", "-conf=/etc/bitcoin/bitcoin.conf", "-datadir=/var/lib/bitcoind", "getblockchaininfo"] diff --git a/contrib/docker/README.md b/contrib/docker/README.md new file mode 100644 index 0000000000..45dee1efa8 --- /dev/null +++ b/contrib/docker/README.md @@ -0,0 +1,67 @@ + +# 🚀 Bitcoin Knots Docker Image (Headless Node) + +This Dockerfile builds and runs a **Bitcoin Knots** full node from source. + +## 🧱 Features + +* Stripped of all non-essential components (tests, debug data, documentation, etc.) +* Data directory persisted via volume +* Accessible via RPC + +--- + +## 📦 Build the Docker Image + +**make sure you're at the root of the repo first!** + +```bash +docker build \ + -f contrib/docker/Dockerfile \ + -t bitcoinknots \ + --build-arg USER_ID=$(id -u) \ + --build-arg GROUP_ID=$(id -g) \ + --load . +``` + +--- + +## ▶️ Run the Node + +```bash +docker run -d \ + --init \ + --user $(id -u):$(id -g) \ + --name bitcoinknots \ + -p 8333:8333 -p 127.0.0.1:8332:8332 \ + -v path/to/conf:/etc/bitcoin/bitcoin.conf:ro \ + -v path/to/data:/var/lib/bitcoind:rw \ + bitcoinknots +``` + +In case you want to use ZeroMQ sockets, make sure to expose those ports as well by adding `-p host_port:container_port` directives to the command above. +In case `path/to/data` is not writable by your user, consider overriding the `--user` flag. + +This will: + +* Start the node in the background +* Save the blockchain and config in `/path/to/data` +* Expose peer and RPC ports + +--- + +## 📊 Check Node Status + +```bash +docker logs bitcoinknots +``` + +--- + +## 🛑 Stop the Node + +```bash +docker stop bitcoinknots +``` + +--- diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 5bb7001561..0ebae76f54 100755 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -285,7 +285,7 @@ mkdir -p "$DISTSRC" case "$HOST" in *mingw*) cmake --build build -j "$JOBS" -t deploy ${V:+--verbose} - mv build/bitcoin-win64-setup.exe "${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" + mv build/bitcoin-win64-setup.exe "${OUTDIR}/${DISTNAME}-win64-setup-pgpverifiable.exe" ;; esac @@ -349,8 +349,8 @@ mkdir -p "$DISTSRC" | xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}" find "${DISTNAME}" -not -name "*.dbg" \ | sort \ - | zip -X@ "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-unsigned.zip" \ - || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-unsigned.zip" && exit 1 ) + | zip -X@ "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-pgpverifiable.zip" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-pgpverifiable.zip" && exit 1 ) find "${DISTNAME}" -name "*.dbg" -print0 \ | xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}" find "${DISTNAME}" -name "*.dbg" \ @@ -387,7 +387,7 @@ mkdir -p "$DISTSRC" ( cd ./windeploy mkdir -p unsigned - cp --target-directory=unsigned/ "${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" + cp --target-directory=unsigned/ "${OUTDIR}/${DISTNAME}-win64-setup-pgpverifiable.exe" cp -r --target-directory=unsigned/ "${INSTALLPATH}" find unsigned/ -name "*.dbg" -print0 \ | xargs -0r rm diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index bfaef8c21f..85de3f2a82 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -2,6 +2,7 @@ ((gnu packages bash) #:select (bash-minimal)) (gnu packages bison) ((gnu packages certs) #:select (nss-certs)) + ((gnu packages check) #:select (libfaketime)) ((gnu packages cmake) #:select (cmake-minimal)) (gnu packages commencement) (gnu packages compression) @@ -212,7 +213,17 @@ and abstract ELF, PE and MachO formats.") (base32 "1j47vwq4caxfv0xw68kw5yh00qcpbd56d7rq6c483ma3y7s96yyz")))) (build-system cmake-build-system) - (inputs (list openssl)) + (arguments + (list + #:phases + #~(modify-phases %standard-phases + (replace 'check + (lambda* (#:key tests? #:allow-other-keys) + (if tests? + (invoke "faketime" "-f" "@2025-01-01 00:00:00" ;; Tests fail after 2025. + "ctest" "--output-on-failure" "--no-tests=error") + (format #t "test suite not run~%"))))))) + (inputs (list libfaketime openssl)) (home-page "https://github.com/mtrojnar/osslsigncode") (synopsis "Authenticode signing and timestamping tool") (description "osslsigncode is a small tool that implements part of the diff --git a/contrib/init/bitcoind.openrc b/contrib/init/bitcoind.openrc index 6e8099e587..c09547240b 100644 --- a/contrib/init/bitcoind.openrc +++ b/contrib/init/bitcoind.openrc @@ -19,7 +19,7 @@ BITCOIND_BIN=${BITCOIND_BIN:-/usr/bin/bitcoind} BITCOIND_NICE=${BITCOIND_NICE:-${NICELEVEL:-0}} BITCOIND_OPTS="${BITCOIND_OPTS:-${BITCOIN_OPTS}}" -name="PyBLOCK daemon" +name="PyBLOCK+BIP-110 daemon" description="Bitcoin cryptocurrency P2P network daemon" required_files="${BITCOIND_CONFIGFILE}" diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index eaa7b896be..5120537626 100755 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -465,18 +465,18 @@ if config.translations_dir: sys.stderr.write(f"Error: Could not find translation dir \"{config.translations_dir[0]}\"\n") sys.exit(1) -print("+ Adding Qt translations +") + print("+ Adding Qt translations +") -translations = Path(config.translations_dir[0]) + translations = Path(config.translations_dir[0]) -regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)') + regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)') -lang_files = [x for x in translations.iterdir() if regex.match(x.name)] + lang_files = [x for x in translations.iterdir() if regex.match(x.name)] -for file in lang_files: - if verbose: - print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name)) - shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name)) + for file in lang_files: + if verbose: + print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name)) + shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name)) # ------------------------------------------------ diff --git a/contrib/seeds/README.md b/contrib/seeds/README.md index a1a2e34b5d..58d7f41130 100644 --- a/contrib/seeds/README.md +++ b/contrib/seeds/README.md @@ -10,14 +10,13 @@ to addrman with). Update `MIN_BLOCKS` in `makeseeds.py` and the `-m`/`--minblocks` arguments below, as needed. -The seeds compiled into the release are created from sipa's, achow101's and luke-jr's +The seeds compiled into the release are created from sipa's and achow101's DNS seed, virtu's crawler, and asmap community AS map data. Run the following commands from the `/contrib/seeds` directory: ``` curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt curl https://21.ninja/seeds.txt.gz | gzip -dc >> seeds_main.txt -curl https://luke.dashjr.org/programs/bitcoin/files/charts/seeds.txt >> seeds_main.txt curl https://mainnet.achownodes.xyz/seeds.txt.gz | gzip -dc >> seeds_main.txt curl https://signet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_signet.txt curl https://testnet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_test.txt diff --git a/contrib/verify-commits/trusted-keys b/contrib/verify-commits/trusted-keys index f25486776f..0121f290b0 100644 --- a/contrib/verify-commits/trusted-keys +++ b/contrib/verify-commits/trusted-keys @@ -3,3 +3,4 @@ D1DBF2C4B96F2DEBF4C16654410108112E7EA81F 152812300785C96444D3334D17565732E08E5E41 6B002C6EA3F91B1B0DF0C9BC8F617F1200A6D25C 4D1B3D5ECBA1A7E05371EEBE46800E30FC748A66 +A8FC55F3B04BA3146F3492E79303B33A305224CB diff --git a/depends/funcs.mk b/depends/funcs.mk index dce45af961..d2562125d0 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -36,7 +36,7 @@ endef define fetch_file ( test -f $$($(1)_source_dir)/$(4) || \ ( $(call fetch_file_inner,$(1),$(2),$(3),$(4),$(5)) || \ - $(call fetch_file_inner,$(1),$(FALLBACK_DOWNLOAD_PATH),$(3),$(4),$(5)))) + $(call fetch_file_inner,$(1),$(FALLBACK_DOWNLOAD_PATH),$(4),$(4),$(5)))) endef define int_get_build_recipe_hash @@ -208,7 +208,6 @@ endif $($(1)_fetched): mkdir -p $$(@D) $(SOURCES_PATH) rm -f $$@ - touch $$@ cd $$(@D); $($(1)_fetch_cmds) cd $($(1)_source_dir); $(foreach source,$($(1)_all_sources),$(build_SHA256SUM) $(source) >> $$(@);) touch $$@ diff --git a/depends/packages/libevent.mk b/depends/packages/libevent.mk index 1f139b1eec..f8816fddc6 100644 --- a/depends/packages/libevent.mk +++ b/depends/packages/libevent.mk @@ -4,6 +4,7 @@ $(package)_download_path=https://github.com/libevent/libevent/releases/download/ $(package)_file_name=$(package)-$($(package)_version).tar.gz $(package)_sha256_hash=92e6de1be9ec176428fd2367677e61ceffc2ee1cb119035037a27d346b0403bb $(package)_patches=cmake_fixups.patch +$(package)_patches += ignore_git_describe.patch $(package)_patches += netbsd_fixup.patch $(package)_patches += winver_fixup.patch $(package)_build_subdir=build @@ -26,6 +27,7 @@ endef define $(package)_preprocess_cmds patch -p1 < $($(package)_patch_dir)/cmake_fixups.patch && \ + patch -p1 < $($(package)_patch_dir)/ignore_git_describe.patch && \ patch -p1 < $($(package)_patch_dir)/netbsd_fixup.patch && \ patch -p1 < $($(package)_patch_dir)/winver_fixup.patch endef diff --git a/depends/packages/miniupnpc.mk b/depends/packages/miniupnpc.mk index 59e7a31310..7afb2ed16f 100644 --- a/depends/packages/miniupnpc.mk +++ b/depends/packages/miniupnpc.mk @@ -1,6 +1,6 @@ package=miniupnpc $(package)_version=2.3.3 -$(package)_download_path=https://miniupnp.tuxfamily.org/files/ +$(package)_download_path=https://github.com/miniupnp/miniupnp/releases/download/$(package)_$(subst .,_,$($(package)_version))/ $(package)_file_name=$(package)-$($(package)_version).tar.gz $(package)_sha256_hash=d52a0afa614ad6c088cc9ddff1ae7d29c8c595ac5fdd321170a05f41e634bd1a $(package)_patches=dont_leak_info.patch diff --git a/depends/packages/native_libmultiprocess.mk b/depends/packages/native_libmultiprocess.mk index 4467dee76f..a76304f9f0 100644 --- a/depends/packages/native_libmultiprocess.mk +++ b/depends/packages/native_libmultiprocess.mk @@ -1,8 +1,8 @@ package=native_libmultiprocess -$(package)_version=1954f7f65661d49e700c344eae0fc8092decf975 +$(package)_version=v5.0 $(package)_download_path=https://github.com/bitcoin-core/libmultiprocess/archive $(package)_file_name=$($(package)_version).tar.gz -$(package)_sha256_hash=fc014bd74727c1d5d30b396813685012c965d079244dd07b53bc1c75c610a2cb +$(package)_sha256_hash=401984715b271a3446e1910f21adf048ba390d31cc93cc3073742e70d56fa3ea $(package)_dependencies=native_capnp define $(package)_config_cmds diff --git a/depends/packages/qrencode.mk b/depends/packages/qrencode.mk index e3f614091d..44e80b2a19 100644 --- a/depends/packages/qrencode.mk +++ b/depends/packages/qrencode.mk @@ -1,8 +1,9 @@ package=qrencode $(package)_version=4.1.1 -$(package)_download_path=https://fukuchi.org/works/qrencode/ -$(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=da448ed4f52aba6bcb0cd48cac0dd51b8692bccc4cd127431402fca6f8171e8e +$(package)_download_path=https://github.com/fukuchi/libqrencode/archive/refs/tags/ +$(package)_download_file=v$($(package)_version).tar.gz +$(package)_file_name=$(package)-$($(package)_version)-github.tar.gz +$(package)_sha256_hash=5385bc1b8c2f20f3b91d258bf8ccc8cf62023935df2d2676b5b67049f31a049c $(package)_patches=cmake_fixups.patch define $(package)_set_vars diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 2500c24d96..5cc7b985f6 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -1,9 +1,9 @@ package=qt -$(package)_version=5.15.17 +$(package)_version=5.15.18 $(package)_download_path=https://download.qt.io/archive/qt/5.15/$($(package)_version)/submodules $(package)_suffix=everywhere-opensource-src-$($(package)_version).tar.xz $(package)_file_name=qtbase-$($(package)_suffix) -$(package)_sha256_hash=db1513cbb3f4a5bd2229f759c0839436f7fe681a800ff2bc34c4960b09e756ff +$(package)_sha256_hash=7b632550ea1048fc10c741e46e2e3b093e5ca94dfa6209e9e0848800e247023b $(package)_linux_dependencies=freetype fontconfig libxcb libxkbcommon libxcb_util libxcb_util_render libxcb_util_keysyms libxcb_util_image libxcb_util_wm $(package)_qt_libs=corelib network widgets gui plugins testlib $(package)_linguist_tools = lrelease lupdate lconvert @@ -25,16 +25,16 @@ $(package)_patches += windows_lto.patch $(package)_patches += darwin_no_libm.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) -$(package)_qttranslations_sha256_hash=309ddea3d2696042001c5d0ef1ea86cec8d0323bc3a0b942b65aaaf5a5d483c9 +$(package)_qttranslations_sha256_hash=e5625757913caf66a9d702ba102ae92cb165d8dde17759b6de9fdea84a1f857f $(package)_qttools_file_name=qttools-$($(package)_suffix) -$(package)_qttools_sha256_hash=433006eb6732bb7f546f63e0d1890477a9dd2f889228f30aa881aed5dfc9bfc6 +$(package)_qttools_sha256_hash=931e0969d9f9d8f233e5e9bf9db0cea9ce9914d49982f1795fe6191010113568 $(package)_extra_sources = $($(package)_qttranslations_file_name) $(package)_extra_sources += $($(package)_qttools_file_name) $(package)_qtwinextras_file_name=qtwinextras-$($(package)_suffix) -$(package)_qtwinextras_sha256_hash=5e0e0e583b03f831ae02bf63a25a01e3bb511100ea6ca695fbc617b5c7f87bdc +$(package)_qtwinextras_sha256_hash=8fbcd86483a348aa1232df7651472b5c7165e6a31d0d66dfd21cc5bfe06083d4 $(package)_extra_sources += $($(package)_qtwinextras_file_name) define $(package)_set_vars diff --git a/depends/patches/libevent/ignore_git_describe.patch b/depends/patches/libevent/ignore_git_describe.patch new file mode 100644 index 0000000000..833757182e --- /dev/null +++ b/depends/patches/libevent/ignore_git_describe.patch @@ -0,0 +1,11 @@ +--- a/cmake/VersionViaGit.cmake ++++ b/cmake/VersionViaGit.cmake +@@ -28,7 +28,7 @@ macro(event_fuzzy_version_from_git) + + find_package(Git) + +- if (GIT_FOUND) ++ if (0) + execute_process( + COMMAND + ${GIT_EXECUTABLE} describe --abbrev=0 --always diff --git a/doc/bips.md b/doc/bips.md index a462b8a070..f5b70d35e6 100644 --- a/doc/bips.md +++ b/doc/bips.md @@ -34,6 +34,7 @@ BIPs that are implemented by Bitcoin Knots: * [`BIP 90`](https://github.com/bitcoin/bips/blob/master/bip-0090.mediawiki): Trigger mechanism for activation of BIPs 34, 65, and 66 has been simplified to block height checks since **v0.14.0** ([PR #8391](https://github.com/bitcoin/bitcoin/pull/8391)). * [`BIP 93`](https://github.com/bitcoin/bips/blob/master/bip-0093.mediawiki): Support for importing codex32 seeds via the `importdescriptors` RPC method is available as of **v25.0.knots20230823** ([PR #27351](https://github.com/bitcoin/bitcoin/pull/27351)). * [`BIP 94`](https://github.com/bitcoin/bips/blob/master/bip-0094.mediawiki): Testnet 4 (`-testnet4`) supported as of **v28.0** ([PR #29775](https://github.com/bitcoin/bitcoin/pull/29775)). +* [`BIP 110`](https://github.com/bitcoin/bips/blob/master/bip-0110.mediawiki): ReducedData Temporary Softfork (RDTS) consensus rules are implemented as of **FIXME: update version** ([PR #238](https://github.com/bitcoinknots/bitcoin/pull/238)). * [`BIP 111`](https://github.com/bitcoin/bips/blob/master/bip-0111.mediawiki): `NODE_BLOOM` service bit added, and enforced for all peer versions as of **v0.13.0** ([PR #6579](https://github.com/bitcoin/bitcoin/pull/6579) and [PR #6641](https://github.com/bitcoin/bitcoin/pull/6641)). * [`BIP 112`](https://github.com/bitcoin/bips/blob/master/bip-0112.mediawiki): The CHECKSEQUENCEVERIFY opcode has been implemented since **v0.12.1** ([PR #7524](https://github.com/bitcoin/bitcoin/pull/7524)), and has been *buried* since **v0.19.0** ([PR #16060](https://github.com/bitcoin/bitcoin/pull/16060)). * [`BIP 113`](https://github.com/bitcoin/bips/blob/master/bip-0113.mediawiki): Median time past lock-time calculations have been implemented since **v0.12.1** ([PR #6566](https://github.com/bitcoin/bitcoin/pull/6566)), and has been *buried* since **v0.19.0** ([PR #16060](https://github.com/bitcoin/bitcoin/pull/16060)). @@ -79,3 +80,4 @@ BIPs that are implemented by Bitcoin Knots: * [`BIP 386`](https://github.com/bitcoin/bips/blob/master/bip-0386.mediawiki): tr() Output Script Descriptors are implemented as of **v22.0** ([PR 22051](https://github.com/bitcoin/bitcoin/pull/22051)). * [`BIP 387`](https://github.com/bitcoin/bips/blob/master/bip-0387.mediawiki): Tapscript Multisig Output Script Descriptors are implemented as of **v24.0** ([PR 24043](https://github.com/bitcoin/bitcoin/pull/24043)). * [`BIP 431`](https://github.com/bitcoin/bips/blob/master/bip-0431.mediawiki): Transactions with nVersion=3 are standard and can be treated as Topologically Restricted Until Confirmation as of **v27.1.knots20240612**, though not enforced by default ([PR 29496](https://github.com/bitcoin/bitcoin/pull/29496)). +* [`BIP 433`](https://github.com/bitcoin/bips/blob/master/bip-0433.mediawiki): Spending of Pay to Anchor (P2A) outputs is standard as of **v28.0** ([PR 30352](https://github.com/bitcoin/bitcoin/pull/30352)). diff --git a/doc/dependencies.md b/doc/dependencies.md index b0aa2efa32..b20ec1f348 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -32,7 +32,7 @@ Bitcoin Core requires one of the following compilers. | ImageMagick (non-macOS gui) | [link](https://imagemagick.org/) | N/A | 6.4.2 | No | | [MiniUPnPc](../depends/packages/miniupnpc.mk) (networking) | [link](https://miniupnp.tuxfamily.org/) | [2.2.7](https://github.com/bitcoin/bitcoin/pull/29707) | 2.1 | No | | [qrencode](../depends/packages/qrencode.mk) (gui) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | N/A | No | -| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/official_releases/qt/) | 5.15.17 | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | +| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/archive/qt/) | 5.15.18 | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | | libicns (macOS deploy) | [link](https://icns.sourceforge.io/) | N/A | 0.8.1 | No | | librsvg (gui) | [link](https://gitlab.gnome.org/GNOME/librsvg) | N/A | 2.40.21 | No | | [ZeroMQ](../depends/packages/zeromq.mk) (notifications) | [link](https://github.com/zeromq/libzmq/releases) | [4.3.4](https://github.com/bitcoin/bitcoin/pull/23956) | 4.0.0 | No | @@ -40,3 +40,7 @@ Bitcoin Core requires one of the following compilers. | [SQLite](../depends/packages/sqlite.mk) (wallet) | [link](https://sqlite.org) | [3.38.5](https://github.com/bitcoin/bitcoin/pull/25378) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No | | Python (scripts, tests) | [link](https://www.python.org) | N/A | [3.10](https://github.com/bitcoin/bitcoin/pull/30527) | No | | [systemtap](../depends/packages/systemtap.mk) ([tracing](tracing.md)) | [link](https://sourceware.org/systemtap/) | [4.8](https://github.com/bitcoin/bitcoin/pull/26945)| N/A | No | +| [capnproto](../depends/packages/capnp.mk) ([multiprocess](multiprocess.md)) | [link](https://capnproto.org/) | [1.2.0](https://github.com/bitcoin/bitcoin/pull/32760)| [0.7.0](https://github.com/bitcoin-core/libmultiprocess/pull/88) | No | +| [libmultiprocess](../depends/packages/libmultiprocess.mk) ([multiprocess](multiprocess.md)) | [link](https://github.com/bitcoin-core/libmultiprocess) | [5.0](https://github.com/bitcoin/bitcoin/pull/31945)| [v5.0-pre1](https://github.com/bitcoin/bitcoin/pull/31740)* | No | + +\* Libmultiprocess 5.x versions should be compatible, but 6.0 and later are not due to bitcoin-core/libmultiprocess#160. diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index 8685cabda8..974f45510d 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "September 2025" "bitcoin-cli v29.1.0.knots20250903" "User Commands" +.TH BITCOIN-CLI "1" "February 2026" "bitcoin-cli v29.3.0.knots20260210" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.1.0.knots20250903 +bitcoin-cli \- manual page for bitcoin-cli v29.3.0.knots20260210 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.1.0.knots20250903 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Knots RPC client version v29.1.0.knots20250903 +Bitcoin Knots RPC client version v29.3.0.knots20260210 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Knots RPC server. .PP @@ -201,8 +201,8 @@ additional "outonly" (or "o") argument can be passed to see outbound peers only. Pass "help" (or "h") for detailed help documentation. .SH COPYRIGHT -Copyright (C) 2009-2025 The Bitcoin Knots developers -Copyright (C) 2009-2025 The Bitcoin Core developers +Copyright (C) 2009-2026 The Bitcoin Knots developers +Copyright (C) 2009-2026 The Bitcoin Core developers Please contribute if you find Bitcoin Knots useful. Visit for further information about the software. diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index 25a302accc..78eacea16b 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "September 2025" "bitcoin-qt v29.1.0.knots20250903" "User Commands" +.TH BITCOIN-QT "1" "February 2026" "bitcoin-qt v29.3.0.knots20260210" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.1.0.knots20250903 +bitcoin-qt \- manual page for bitcoin-qt v29.3.0.knots20260210 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Knots version v29.1.0.knots20250903 +Bitcoin Knots version v29.3.0.knots20260210 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Knots. .PP @@ -141,7 +141,7 @@ Imports blocks from external file on startup \fB\-lowmem=\fR .IP If system available memory falls below MiB, flush caches (0 to -disable, default: 64) +disable, default: 0) .HP \fB\-maxmempool=\fR .IP @@ -795,7 +795,7 @@ Treat extra data in transactions as at least N vbytes per actual byte \fB\-datacarriersize\fR .IP Maximum size of data in data carrier transactions we relay and mine, in -bytes (default: 42) +bytes (default: 83) .HP \fB\-dustdynamic\fR=\fI\,off\/\fR|[*]target:|[*]mempool: .IP @@ -1054,8 +1054,8 @@ Enable statistics (default: 1 for GUI, 0 otherwise) .IP Set the memory limit target for statistics in bytes (default: 10485760) .SH COPYRIGHT -Copyright (C) 2009-2025 The Bitcoin Knots developers -Copyright (C) 2009-2025 The Bitcoin Core developers +Copyright (C) 2009-2026 The Bitcoin Knots developers +Copyright (C) 2009-2026 The Bitcoin Core developers Please contribute if you find Bitcoin Knots useful. Visit for further information about the software. diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index 54d9bc0faa..332f6f8fc6 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "September 2025" "bitcoin-tx v29.1.0.knots20250903" "User Commands" +.TH BITCOIN-TX "1" "February 2026" "bitcoin-tx v29.3.0.knots20260210" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.1.0.knots20250903 +bitcoin-tx \- manual page for bitcoin-tx v29.3.0.knots20260210 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.1.0.knots20250903 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Knots bitcoin\-tx utility version v29.1.0.knots20250903 +Bitcoin Knots bitcoin\-tx utility version v29.3.0.knots20260210 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP @@ -152,8 +152,8 @@ set=NAME:JSON\-STRING .IP Set register NAME to given JSON\-STRING .SH COPYRIGHT -Copyright (C) 2009-2025 The Bitcoin Knots developers -Copyright (C) 2009-2025 The Bitcoin Core developers +Copyright (C) 2009-2026 The Bitcoin Knots developers +Copyright (C) 2009-2026 The Bitcoin Core developers Please contribute if you find Bitcoin Knots useful. Visit for further information about the software. diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index d4e01901e8..ec23a2dd9c 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "September 2025" "bitcoin-util v29.1.0.knots20250903" "User Commands" +.TH BITCOIN-UTIL "1" "February 2026" "bitcoin-util v29.3.0.knots20260210" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.1.0.knots20250903 +bitcoin-util \- manual page for bitcoin-util v29.3.0.knots20260210 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.1.0.knots20250903 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Knots bitcoin\-util utility version v29.1.0.knots20250903 +Bitcoin Knots bitcoin\-util utility version v29.3.0.knots20260210 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS @@ -71,8 +71,8 @@ grind .IP Perform proof of work on hex header string .SH COPYRIGHT -Copyright (C) 2009-2025 The Bitcoin Knots developers -Copyright (C) 2009-2025 The Bitcoin Core developers +Copyright (C) 2009-2026 The Bitcoin Knots developers +Copyright (C) 2009-2026 The Bitcoin Core developers Please contribute if you find Bitcoin Knots useful. Visit for further information about the software. diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index d98c1f0767..400d7473e8 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "September 2025" "bitcoin-wallet v29.1.0.knots20250903" "User Commands" +.TH BITCOIN-WALLET "1" "February 2026" "bitcoin-wallet v29.3.0.knots20260210" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.1.0.knots20250903 +bitcoin-wallet \- manual page for bitcoin-wallet v29.3.0.knots20260210 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Knots bitcoin\-wallet utility version v29.1.0.knots20250903 +Bitcoin Knots bitcoin\-wallet utility version v29.3.0.knots20260210 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Knots wallet files. .PP @@ -133,8 +133,8 @@ salvage Attempt to recover private keys from a corrupt wallet. Warning: \&'salvage' is experimental. .SH COPYRIGHT -Copyright (C) 2009-2025 The Bitcoin Knots developers -Copyright (C) 2009-2025 The Bitcoin Core developers +Copyright (C) 2009-2026 The Bitcoin Knots developers +Copyright (C) 2009-2026 The Bitcoin Core developers Please contribute if you find Bitcoin Knots useful. Visit for further information about the software. diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index 095ca6d60a..14498d4d0c 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "September 2025" "bitcoind v29.1.0.knots20250903" "User Commands" +.TH BITCOIND "1" "February 2026" "bitcoind v29.3.0.knots20260210" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.1.0.knots20250903 +bitcoind \- manual page for bitcoind v29.3.0.knots20260210 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Knots daemon version v29.1.0.knots20250903 +Bitcoin Knots daemon version v29.3.0.knots20260210 .PP The Bitcoin Knots daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP @@ -141,7 +141,7 @@ Imports blocks from external file on startup \fB\-lowmem=\fR .IP If system available memory falls below MiB, flush caches (0 to -disable, default: 64) +disable, default: 0) .HP \fB\-maxmempool=\fR .IP @@ -795,7 +795,7 @@ Treat extra data in transactions as at least N vbytes per actual byte \fB\-datacarriersize\fR .IP Maximum size of data in data carrier transactions we relay and mine, in -bytes (default: 42) +bytes (default: 83) .HP \fB\-dustdynamic\fR=\fI\,off\/\fR|[*]target:|[*]mempool: .IP @@ -1028,8 +1028,8 @@ Enable statistics (default: 1 for GUI, 0 otherwise) .IP Set the memory limit target for statistics in bytes (default: 10485760) .SH COPYRIGHT -Copyright (C) 2009-2025 The Bitcoin Knots developers -Copyright (C) 2009-2025 The Bitcoin Core developers +Copyright (C) 2009-2026 The Bitcoin Knots developers +Copyright (C) 2009-2026 The Bitcoin Core developers Please contribute if you find Bitcoin Knots useful. Visit for further information about the software. diff --git a/doc/release-30635.md b/doc/release-30635.md deleted file mode 100644 index 0ec68e93cc..0000000000 --- a/doc/release-30635.md +++ /dev/null @@ -1,5 +0,0 @@ -Updated RPCs ------------- - -- The waitfornewblock now takes an optional `current_tip` argument. It is also no longer hidden. (#30635) -- The waitforblock and waitforblockheight RPCs are no longer hidden. (#30635) diff --git a/doc/release-notes.md b/doc/release-notes.md index 0f7d89123f..98f654640c 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,9 +1,8 @@ -Bitcoin Knots version 29.1.knots20250903 is now available from: +Bitcoin Core version 29.3.knots20260210 is now available from: - + -This release includes new features, various bug fixes and performance -improvements, as well as updated translations. +This release includes various bug fixes. Please report bugs using the issue tracker at GitHub: @@ -49,443 +48,135 @@ to do so until/unless that is resolved. Notable changes =============== -### P2P and Network Changes - -- libnatpmp has been replaced with a built-in implementation of PCP and - NAT-PMP (still enabled or disabled using the `-natpmp` option). This - supports automatic IPv4 port forwarding as well as IPv6 pinholing. (#30043) - -- NAT-PMP is now enabled by default. This means nodes with `-listen` enabled - (the default) but running behind a firewall, such as a local network router, - will be reachable if the firewall/router supports either of the `PCP` or - `NAT-PMP` protocols. It can be turned off with the `-natpmp=0` option. - (#33004) - -- Upon receiving an orphan transaction (an unconfirmed transaction that spends unknown inputs), the node will attempt to download missing parents from all peers who announced the orphan. This change may increase bandwidth usage but make orphan-handling more reliable. (#31397) - -- In addition to the count-based `-blockreconstructionextratxn` limit on cached - transactions not accepted for relaying, a new - `-blockreconstructionextratxnsize` option has been added to set an upper - limit on the total memory usage consumed by this cache (10 MB by default). - -- The default `-blockreconstructionextratxn` limit is increased to 32768 - transactions. - -- `-peerbloomfilters` is now restricted to localhost by default. If you use - BIP37 wallet software remotely, you should use the - `-whitelist bloomfilter@` configuration. You can also set - `-peerbloomfilters=0` to disable it for localhost, or `-peerbloomfilters=1` - if you wish to provide the service to the entire network. If you wish to - offer it publicly, do note that this service can be resource-intensive. - -### Mempool Policy and Mining Changes - -- The `-maxscriptsize` policy now applies to the entire witness stack of each - input, to address attempts to evade overly-specific targetting. - -- Ephemeral anchors is a new concept that allows a single dummy recipient - in a transaction, provided the transaction is zero fee and the "anchor" is - immediately sent in another transaction broadcast together with it. This - allows for smart contracts such as Lightning where neither party can - unilaterally increase the transaction fee, yet using an anchor can create - a followup adding the necessary fee. (#30239) - By default, these anchors are accepted by Bitcoin Knots if and only if they - are minimal size and zero value. If you want a more flexible policy - (allowing for dummy sends and/or dust amounts), or wish to reject these new - anchors entirely, you can use the new `-permitephemeral` option. - There is also a `-permitbareanchor` option which permits (or forbids) - transaction that do not have real recipients (only an anchor). (knots#136) - -- A new `-permitbaredatacarrier` option (default 0 / not permitted) has been - added to control acceptance of transactions with only a datacarrier output - and no real recipients. This is sometimes used to burn bitcoins. (knots#136) - -- The maximum number of potentially executed legacy signature operations in a - single standard transaction is now limited (by default) to 2500. Signature - operations in all previous output scripts, in all input scripts, as well as - all P2SH redeem scripts (if there are any) are counted toward the limit. - (#32521) It can be configured with the `-maxtxlegacysigops` option. - -- A new option `-acceptunknownwitness` has been provided to filter - transactions sending to/using unknown/future witness script versions. While - this should generally be safe, it will also affect batch transactions, which - may be created be unsuspecting third parties who do not pay attention to the - witness version of addresses they send to (this is considered a best - practice). For that reason, the new filter is not enabled by default, and if - you wish to use it, you must set `-acceptunknownwitness=0` in your - configuration. - -- Two new options, `-minrelaycoinblocks` and `-minrelaymaturity`, have been - added to restrict transactions relayed/mined to only ones spending bitcoins - with some degree of settlement. The former measures the value of bitcoins - being spent at a rate of 1 BTC per block since their confirmation, while the - second is a strict block-based maturity metric. In both cases, the minimum - must be met by transactions before the node will relay or mine them. These - are both disabled by default. (knots#148) - -- Several policy filters exist to make future protocol changes safer, - collectively classified as "non-mandatory-script-verify-flag" rejections. - Unlike other policies, previous versions of Bitcoin Knots did not allow - disabling these filters. However, this makes recovery difficult when people - accidentally lock their bitcoins behind filtered "upgradable opcodes", and - to accomidate recovery, this version of Knots allows specifying these - rejection reasons to the `ignore_rejects` parameter of `sendrawtransaction`, - thereby overriding the rejection on a per-transaction basis (as with other - filters). Please be responsible with this feature, and note that using it - during a network upgrade may result in creating invalid blocks and lost - mining rewards! - -- The `-rejecttokens` and datacarrier-related policies have been updated to - detect "OLGA" spam. (knots#151) - -### GUI changes - -- The configured "font for amounts" is now consistently used for all monetary - amounts in the GUI. - -- The embedded "Roboto Mono Bold" font has been replaced with a new - "OCR-Bitcoin" font created specifically for Bitcoin Knots. - -- Qt 6.2+ is now supported as an alternative to Qt 5.15 (which remains the - default for precompiled releases). As the Qt project no longer supports - version 5.15, it will likely be removed in a future release. To build the - source code using Qt 6, specify -D WITH_QT_VERSION=6 to your cmake command - line. - -- Support for ᵇTBC and ˢTBC units has been removed, since Bitcoin's value has - made them largely unnecessary. The basic TBC unit is now available to all - users without jumping through hoops to install a new Tonal-enabled font. - See [the Bitcoin Wiki page on Tonal Bitcoin](https://en.bitcoin.it/wiki/Tonal_Bitcoin) to learn more about the - (eccentric) tonal bitcoin unit(s). - -### Logging - -Unconditional logging to disk is now rate limited by giving each source location -a quota of 1MiB per hour. Unconditional logging is any logging with a log level -higher than debug, that is `info`, `warning`, and `error`. All logs will be -prefixed with `[*]` if there is at least one source location that is currently -being suppressed. (#32604) - -When `-logsourcelocations` is enabled, the log output now contains the entire -function signature instead of just the function name. (#32604) - -### Updated RPCs - -- The RPC `testmempoolaccept` response now includes a `reject-details` field in some cases, -similar to the complete error messages returned by `sendrawtransaction` (#28121) - -- Duplicate blocks submitted with `submitblock` will now persist their block data -even if it was previously pruned. If pruning is activated, the data will be -pruned again eventually once the block file it is persisted in is selected for -pruning. This is consistent with the behaviour of `getblockfrompeer` where the -block is persisted as well even when pruning. (#31175) - -- `getmininginfo` now returns `nBits` and the current target in the `target` field. It also returns a `next` object which specifies the `height`, `nBits`, `difficulty`, and `target` for the next block. (#31583) - -- `getblock` and `getblockheader` now return the current target in the `target` field (#31583) - -- `getblockchaininfo` and `getchainstates` now return `nBits` and the current target in the `target` field (#31583) - -- The newly-unhidden `waitfornewblock` (which simply does not return until a - new block has been received) now takes an optional `current_tip` argument to - avoid a potential race between the new block and the RPC call. If provided, - the RPC will return immediately if the best block already does not match. - (#30635) - -- `waitforblock` (which waits for a specific block hash before returning) and - `waitforblockheight` (which waits for a given height to be reached) are no - longer hidden. (#30635) - -- The `getblocktemplate` RPC `mintime` (BIP23) field now accounts for the - timewarp fix proposed in BIP94 on all networks. This ensures that, in the - event a timewarp fix softfork activates on Bitcoin, un-upgraded miners will - not accidentally violate the timewarp rule. -As a reminder, it's important that any software which uses the `getblocktemplate` -RPC takes these values into account (either `curtime` or `mintime` is fine). -Relying only on a clock can lead to invalid blocks under some circumstances, -especially once a timewarp fix is deployed. (#31600) - -- The `gettxoutproof` and `verifytxoutproof` methods have been extended with a - new Segwit-aware mode (enabled with `prove_witness` and `verify_witness` - named options, respectively). In this mode, the proofs prove the "witness - txid" (wtxid) instead of the traditional transaction id (txid). The format - of these proofs is currently considered experimental and may be changed in - future versions. (#32844) - -- `getpeerinfo` now includes `last_block_announcement` for each peer, for - the most recent time that peer has been the first to notify the local node - of a new block (or zero if it has never been the first). (#27052) - -- The `dumptxoutset` RPC now requires a `type` parameter to be specified. To - have the same behavior before v29, use the "latest" parameter. (#30808) +Numerous wallet bugs have been fixed, including some obscure scenarios that +could delete the wallet (n.b. the ordinary-use disaster in Core 30 never +affected Knots). As a side effect, unsupported BDB versions (any other than +exactly 4.8 may experience reduced compatibility. -Changes to wallet-related RPCs can be found in the Wallet section below. +### P2P -### Updated REST APIs - -- `GET /rest/block/.json` and `GET /rest/headers/.json` now return the current target in the `target` field - -- A new REST API endpoint (`/rest/spenttxouts/BLOCKHASH`) has been introduced - for efficiently fetching spent transaction outputs using the block's undo - data. (#32540) +- #33956 net: fix use-after-free with v2->v1 reconnection logic +- #34025 net: Waste less time in socket handling +- #34028 p2p: saturate LocalServiceInfo::nScore to prevent overflow +- #34093 netif: fix compilation warning in QueryDefaultGatewayImpl() +- Net: Reduce log level for repeated PCP/NAT-PMP NOT_AUTHORIZED failures by default ### Wallet -- The `walletcreatefundedpsbt` RPC method will now set a recent block height - as the transaction lock time, if a lock time is not otherwise provided, to - discourage miners from attempting to fee-snipe. - -- `bumpfee` as well as `psbtbumpfee` now offer a `require_replacable` - parameter which can be set to false to bump the fee on transactions that - do not signal BIP125 transaction replacability. Bumping fees in the GUI will - likewise allow non-signalling transactions, with a warning. (#31953) - It is expected that the `require_replacable` parameter may default to false - in the future, or perhaps even be removed entirely. - -- When bumping transaction fees in the GUI, the "Create Unsigned" option now - opens the PSBT Operations dialog rather than simply copying the raw PSBT to - the clipboard directly. - -### Updated Settings - -- The `-rpcuser` and `-rpcpassword` settings are no longer considered - deprecated, and are expected to remain supported for the immediate future. - (#32423) - -- Previously, `-proxy` specified the proxy for all networks (except I2P which - uses `-i2psam`) and only the Tor proxy could have been specified separately - via `-onion`. Now, the syntax of `-proxy` has been extended and it is possible - to specify separately the proxy for IPv4, IPv6, Tor and CJDNS by appending `=` - followed by the network name, for example `-proxy=127.0.0.1:5555=ipv6` - configures a proxy only for IPv6. The `-proxy` option can be used multiple - times to define different proxies for different networks, such as - `-proxy=127.0.0.1:4444=ipv4 -proxy=10.0.0.1:6666=ipv6`. Later settings - override earlier ones for the same network; this can be used to remove an - earlier all-networks proxy and use direct connections only for a given - network, for example `-proxy=127.0.0.1:5555 -proxy=0=cjdns`. (#32425) - -- The `-maxmempool` startup parameter is now capped on 32-bit systems to - 500MB. (#32530) - -- Handling of negated `-noseednode`, `-nobind`, `-nowhitebind`, `-norpcbind`, `-norpcallowip`, `-norpcwhitelist`, `-notest`, `-noasmap`, `-norpcwallet`, `-noonlynet`, and `-noexternalip` options has changed. Previously negating these options had various confusing and undocumented side effects. Now negating them just resets the settings and restores default behaviors, as if the options were not specified. - -- As a safety check, Bitcoin Knots will **fail to start** when `-blockreservedweight` init parameter value is lower than `2000` weight units. Bitcoin Knots will also **fail to start** if the `-blockmaxweight` or `-blockreservedweight` init parameter exceeds consensus limit of `4,000,000 WU`. - -- Passing `-debug=0` or `-debug=none` now behaves like `-nodebug`: previously set debug categories will be cleared, but subsequent `-debug` options will still be applied. - -### Tools and Utilities - -- `bitcoin-cli -netinfo` now includes information about CPU time processing - messages to/from each peer. (#31672) - -- `bitcoin-cli` will now just do the right thing if passed a block hash to - height-or-hash parameters for `gettxoutsetinfo`, `dumptxoutset`, and - `getblockstats`. (#33230) - -### Build System - -The build system has been migrated from Autotools to CMake: - -1. The minimum required CMake version is 3.22. -2. In-source builds are not allowed. When using a subdirectory within the root source tree as a build directory, it is recommended that its name includes the substring "build". -3. CMake variables may be used to configure the build system. See [Autotools to CMake Options Mapping](https://github.com/bitcoinknots/bitcoin-devwiki/wiki/Autotools-to-CMake-Options-Mapping) for details. -4. For single-configuration generators, the default build configuration (`CMAKE_BUILD_TYPE`) is "RelWithDebInfo". However, for the "Release" configuration, CMake defaults to the compiler optimization flag `-O3`, which has not been extensively tested with Bitcoin Knots. Therefore, the build system replaces it with `-O2`. -5. By default, the built executables and libraries are located in the `bin/` and `lib/` subdirectories of the build directory. -6. The build system supports component‐based installation. The names of the installable components coincide with the build target names. For example: -``` -cmake -B build -cmake --build build --target bitcoind -cmake --install build --component bitcoind -``` - -7. If any of the `CPPFLAGS`, `CFLAGS`, `CXXFLAGS` or `LDFLAGS` environment variables were used in your Autotools-based build process, you should instead use the corresponding CMake variables (`APPEND_CPPFLAGS`, `APPEND_CFLAGS`, `APPEND_CXXFLAGS` and `APPEND_LDFLAGS`). Alternatively, if you opt to use the dedicated `CMAKE_<...>_FLAGS` variables, you must ensure that the resulting compiler or linker invocations are as expected. - -For more detailed guidance on configuring and using CMake, please refer to the official [CMake documentation](https://cmake.org/cmake/help/latest/) and [CMake’s User Interaction Guide](https://cmake.org/cmake/help/latest/guide/user-interaction/index.html). Additionally, consult platform-specific `doc/build-*.md` build guides for instructions tailored to your operating system. - -### Software Expiration - -Since v0.14.2.knots20170618, each new version of Bitcoin Knots by default -expires 1-2 years after its release (during November). This is a security -precaution to help ensure nodes remain kept up to date. - -New in this version, Bitcoin Knots will provide a warning 4 weeks prior to -expiry and send an alert (see `-alertnotify`). When the expiry is reached, -the warning will be updated and another alert sent. Mining will also be -disabled at that time. - -This is an optional feature. You may disable it by setting `softwareexpiry=0` -in your config file, but this is strongly discouraged without some other form -of update reminders. You may also set `softwareexpiry` to any other POSIX -timestamp, to trigger an expiration at that time instead. - -## Low-Level Changes - -### Consensus - -- Previously, if a node was restarted during a block race (two parallel blocks - with equally best work), there was a random chance the node would switch to - a different one than it had chosen prior to the restart. This has changed so - that the currently-active chain remains the same. (#29640) - -### Tools and Utilities - -- A new tool [`utxo_to_sqlite.py`](/contrib/utxo-tools/utxo_to_sqlite.py) - converts a compact-serialized UTXO snapshot (as created with the - `dumptxoutset` RPC) to a SQLite3 database. Refer to the script's `--help` - output for more details. (#27432) - -### Service definitions - -- The included OpenRC service has been adapted to FHS 3.0 and provides a new - BITCOIND_LOGDIR variable to control where the debug.log file is written. - -- The OpenRC service will now give the RPC cookie file group-readable access, - so that other programs running in the $BITCOIND_GROUP (by default, - 'bitcoin') can access the RPC server automatically. - -- The OpenRC service starts bitcoind in the background, and only becomes - active (to trigger dependent services) when the node and RPC server has - initialised. This ensures the node is accessible before any services - relying on it start, without blocking other unrelated system services. - (#24066) - -### Stability - -- During initial synchronisation (as well as reindexing), the node will now - write its progress to disk at least once an hour, instead of the previous - 24 hour wait on systems with lots of memory and large dbcache configuration. - This should avoid as much lost progress in the event of interruption, and - improve shutdown speeds. (#30611, #32414) - -### Tests - -- The BIP94 timewarp attack mitigation (designed for testnet4) is no longer active on the regtest network. (#31156) - -### Dependencies - -- Building the GUI from source now requires rsvg-convert (often packaged as - librsvg2-bin, librsvg2-tools, or simply librsvg), ImageMagick (except on - macOS), and libicns (only for macOS). - -- libnatpmp has been removed as a dependency (#31130, #30043). +- #31423 wallet: migration, avoid creating spendable wallet from a watch-only legacy wallet +- #32273 wallet: Fix relative path backup during migration. +- #33268 wallet: Identify transactions spending 0-value outputs, and add tests for anchor outputs in a wallet +- #34156 wallet: fix unnamed legacy wallet migration failure +- #34226 wallet: test: Relative wallet failed migration cleanup +- #34123 wallet: migration, avoid creating spendable wallet from a watch-only legacy wallet +- #34176 wallet: crash fix, handle non-writable db directories +- #34215 wallettool: fix unnamed createfromdump failure walletsdir deletion +- #34370 wallet: Additional cleanups for migration, and fixes for createfromdump with BDB +- knots#242 Fix bugs in various BDB wallet edge cases +- knots#255 Wallet: Even if addresstype==legacy, use non-legacy change if there's no legacy sPKman +- Wallet/bdb: Use LogWarning/LogError as appropriate +- Bugfix: Fee estimation: Refactor logic to avoid unlikely unsigned overflow in TxConfirmStats::Read +- Bugfix: Wallet/bdb: Catch exceptions in MakeBerkeleyDatabase +- Wallet/bdb: improve error msg when db directory is not writable + +### Mempool + +- knots#246 Bugfix: txmempool: Fallback to CTxMemPoolEntry copying if Boost is too old for node extraction + +### Block and transaction handling + +- #34462 util: Drop *BSD headers in `batchpriority.cpp` + +### GUI + +- gui#899 Modernize custom filtering +- gui#924 Show an error message if the restored wallet name is empty +- knots#197 GUI: Visually move and rephrase port mapping checkboxes +- knots#244 Bugfix: GUI: Queue stylesheet changes within eventFilters +- knots#245 GUI: Minor: Fix typo in options dialog tooltip +- Revert bringToFront Wayland workaround for Qt versions >=6.3.2 with the bug fixed + +### Build + +- #34227 guix: Fix `osslsigncode` tests +- secp256k1#1749 build: Fix warnings in x86_64 assembly check +- depends: Qt 5.15.18 + +### Documentation + +- #33623 doc: document capnproto and libmultiprocess deps in 29.x +- #33993 init: point out -stopatheight may be imprecise +- #34252 doc: add 433 (Pay to Anchor) to bips.md + +### Test + +- #32588 test: Allow testing of check failures +- #33612 test: change log rate limit version gate +- #33915 test: Retry download in get_previous_releases.py +- #33990 test: p2p: check that peer's announced starting height is remembered +- #34185 test: fix `feature_pruning` when built without wallet +- #34282 qa: Fix Windows logging bug +- #34372 QA: wallet_migration: Test several more weird scenarios +- #34369 test: Scale NetworkThread close timeout with timeout_factor + +### Misc + +- #29678 Bugfix: init: For first-run disk space check, round up pruned size requirement +- #32513 ci: remove 3rd party js from windows dll gha job +- #33508 ci: fix buildx gha cache authentication on forks +- #33581 ci: Properly include $FILE_ENV in DEPENDS_HASH +- #33813 Capitalise rpcbind-ignored warning message +- #33960 log: Use more severe log level (warn/err) where appropriate +- #34161 refactor: avoid possible UB from `std::distance` for `nullptr` args +- #34224 init: Return EXIT_SUCCESS on interrupt +- #34235 miniminer: stop assuming ancestor fees >= self fees +- #34253 validation: cache tip recency for lock-free IsInitialBlockDownload() +- #34272 psbt: Fix `PSBTInputSignedAndVerified` bounds `assert` +- #34293 Bugfix: net_processing: Restore missing comma between peer and peeraddr in "receive version message" and "New ___ peer connected" +- #34328 rpc: make `uptime` monotonic across NTP jumps +- #34344 ci: update GitHub Actions versions +- #34436 util: add overflow-safe `CeilDiv` helper +- bitcoin-core/leveldb-subtree#58 Initialize file_size to 0 to avoid UB +- secp256k1#1731 schnorrsig: Securely clear buf containing k or its negation +- Bugfix: Rework MSVCRT workaround to correctly exclusive-open on Windows Credits ======= -Thanks to everyone who directly contributed to this release: - -- 0xB10C -- achow101 -- Adlai Chandrasekhar -- Afanti -- Alfonso Roman Zubeldia -- am-sq -- Andre -- Andre Alves -- Andrew Toth +Thanks to everyone who contributed to this release, including but not necessarily limited to: + +- ANAVHEOBA - Anthony Towns - Antoine Poinsot -- Ash Manning - Ataraxia - Ava Chow -- benthecarman -- bigspider -- Boris Nagaev -- Brandon Odiwuor -- Bufo - brunoerg -- Chris Stewart -- Cory Fields -- costcould -- Crypt-iQ -- Daniel Pfeifer -- Daniela Brozzoni +- Carlo Antinarella +- codeabysss - David Gumberg -- deadmanoz -- dergoegge -- enirox001 -- epysqyli -- espi3 -- Eval EXEC -- Fabian Jahr +- Eugene Siegel - fanquake +- Felipe Micaroni Lalli +- Fonta1n3 - furszy -- Gabriele Bocchi - glozow -- Greg Sanders -- Gutflo -- Haoran Peng -- Haowen Liu - Hennadii Stepanov -- Hodlinator -- i-am-yuvi -- ion- - ismaelsadeeq -- Jadi -- James O'Beirne -- jb55 -- Jeremy Rand -- jlopp -- Jon Atack -- josibake -- jurraca -- Kay -- kevkevinpal -- Kurtis Stirling -- l0rinc -- laanwj -- Larry Ruane -- Léo Haf +- jestory +- John Moffett - Lőrinc -- luisschwab -- Maciej S. Szmigiero -- Mackain +- Luke Dashjr +- m3dwards - MarcoFalke -- marcofleon -- Marnix -- Martin Leitner-Ankerl -- Martin Saposnic - Martin Zumsande -- Matt Whitlock -- Matthew Zipkin -- Max Edwards -- Michael Dietz -- monlovesmango -- naiyoma -- nervana21 -- Nicola Leonardo Susca -- Novo -- omahs -- omg21btc -- pablomartin4btc +- Michael Dance +- Navneet Singh +- Padraic Slattery +- Patrick Strateman - Pieter Wuille -- Pithosian -- R E Broadley -- Randall Naar -- RiceChuan -- rkrux -- romanz -- Roman Zeyde -- Ryan Ofsky +- Russell Yanofsky +- SatsAndSports - Sebastian Falbesoner -- secp512k2 -- Sergi Delgado Segura -- shiny -- Shunsuke Shimizu -- Simon -- Sjors Provoost -- Skyler -- stickies-v -- Suhas Daftuar -- tdb3 -- TheCharlatan -- theStack -- tianzedavid -- Tomás Andróil -- Torkel Rogstad +- sedited - Vasil Dimov -- w0xlt -- wgyt - willcl-ark -- yancy -- zaidmstrr diff --git a/share/examples/bitcoin.conf b/share/examples/bitcoin.conf index 211fd55f9a..b0a7127041 100644 --- a/share/examples/bitcoin.conf +++ b/share/examples/bitcoin.conf @@ -109,7 +109,7 @@ #loadblock= # If system available memory falls below MiB, flush caches (0 to -# disable, default: 64) +# disable, default: 0) #lowmem= # Keep the transaction memory pool below megabytes (default: 300) @@ -656,7 +656,7 @@ #datacarriercost=1 # Maximum size of data in data carrier transactions we relay and mine, in -# bytes (default: 42) +# bytes (default: 83) #datacarriersize=1 # Automatically raise dustrelayfee based on either the expected fee to be diff --git a/share/setup.nsi.in b/share/setup.nsi.in index 9649a2bd07..7a0bbd811c 100644 --- a/share/setup.nsi.in +++ b/share/setup.nsi.in @@ -112,6 +112,15 @@ Section -post SEC0001 WriteRegStr HKCR "@CLIENT_TARNAME@" "" "URL:Bitcoin" WriteRegStr HKCR "@CLIENT_TARNAME@\DefaultIcon" "" $INSTDIR\@BITCOIN_GUI_NAME@@EXEEXT@ WriteRegStr HKCR "@CLIENT_TARNAME@\shell\open\command" "" '"$INSTDIR\@BITCOIN_GUI_NAME@@EXEEXT@" "%1"' + + DeleteRegValue HKCU "${REGKEY} (64-bit)\Components" Main + DeleteRegKey HKCU "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\$(^Name) (64-bit)" + Delete /REBOOTOK "$SMPROGRAMS\$StartMenuGroup\Uninstall $(^Name) (64-bit).lnk" + Delete /REBOOTOK "$SMPROGRAMS\$StartMenuGroup\$(^Name) (64-bit).lnk" + DeleteRegValue HKCU "${REGKEY} (64-bit)" StartMenuGroup + DeleteRegValue HKCU "${REGKEY} (64-bit)" Path + DeleteRegKey /IfEmpty HKCU "${REGKEY} (64-bit)\Components" + DeleteRegKey /IfEmpty HKCU "${REGKEY} (64-bit)" SectionEnd # Macro for selecting uninstaller sections diff --git a/src/addrman.cpp b/src/addrman.cpp index 9c3a24db90..5cd1d41fd9 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -1055,7 +1055,7 @@ void AddrManImpl::Check() const const int err{CheckAddrman()}; if (err) { - LogPrintf("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i\n", err); + LogError("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i", err); assert(false); } } diff --git a/src/arith_uint256.h b/src/arith_uint256.h index 60b371f6d3..b035a02c00 100644 --- a/src/arith_uint256.h +++ b/src/arith_uint256.h @@ -36,20 +36,8 @@ class base_uint pn[i] = 0; } - base_uint(const base_uint& b) - { - for (int i = 0; i < WIDTH; i++) - pn[i] = b.pn[i]; - } - - base_uint& operator=(const base_uint& b) - { - if (this != &b) { - for (int i = 0; i < WIDTH; i++) - pn[i] = b.pn[i]; - } - return *this; - } + base_uint(const base_uint& b) = default; + base_uint& operator=(const base_uint& b) = default; base_uint(uint64_t b) { @@ -276,6 +264,9 @@ class arith_uint256 : public base_uint<256> { friend arith_uint256 UintToArith256(const uint256 &); }; +// Keeping the trivially copyable property is beneficial for performance +static_assert(std::is_trivially_copyable_v); + uint256 ArithToUint256(const arith_uint256 &); arith_uint256 UintToArith256(const uint256 &); diff --git a/src/bench/xor.cpp b/src/bench/xor.cpp index fc9dc5d172..020de08612 100644 --- a/src/bench/xor.cpp +++ b/src/bench/xor.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -14,10 +15,12 @@ static void Xor(benchmark::Bench& bench) { FastRandomContext frc{/*fDeterministic=*/true}; auto data{frc.randbytes(1024)}; - auto key{frc.randbytes(31)}; + const Obfuscation obfuscation{frc.randbytes()}; + size_t offset{0}; bench.batch(data.size()).unit("byte").run([&] { - util::Xor(data, key); + obfuscation(data, offset++); // mutated differently each time + ankerl::nanobench::doNotOptimizeAway(data); }); } diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 86435410f5..03cb54b996 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -483,6 +483,8 @@ class NetinfoRequestHandler : public BaseRequestHandler str += 'T'; } else if (s == "UTREEXO_TMP?") { str += 'y'; + } else if (s == "REDUCED_DATA?") { + str += '4'; } else { str += ToLower(s[0]); } @@ -765,6 +767,7 @@ class NetinfoRequestHandler : public BaseRequestHandler " \"T\" - UTREEXO_ARCHIVE peer can handle Utreexo proof requests for all historical blocks\n" " \"y\" - UTREEXO_TMP? peer can handle Utreexo proof requests\n" " \"r\" - REPLACE_BY_FEE? peer supports replacement of transactions without BIP 125 signalling\n" + " \"4\" - REDUCED_DATA? peer enforces the ReducedData SoftFork\n" " \"m\" - MALICIOUS? peer openly seeks to aid in bypassing network policy/spam filters (OR to sabotage nodes that seek to)\n" " \"u\" - UNKNOWN: unrecognized bit flag\n" " v Version of transport protocol used for the connection\n" diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp index 5f4061a71d..b8f30103a4 100644 --- a/src/blockencodings.cpp +++ b/src/blockencodings.cpp @@ -180,11 +180,10 @@ bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const return txn_available[index] != nullptr; } -ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector& vtx_missing) +ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector& vtx_missing, bool segwit_active) { if (header.IsNull()) return READ_STATUS_INVALID; - uint256 hash = header.GetHash(); block = header; block.vtx.resize(txn_available.size()); @@ -205,24 +204,22 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector< if (vtx_missing.size() != tx_missing_offset) return READ_STATUS_INVALID; - BlockValidationState state; - CheckBlockFn check_block = m_check_block_mock ? m_check_block_mock : CheckBlock; - if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) { - // TODO: We really want to just check merkle tree manually here, - // but that is expensive, and CheckBlock caches a block's - // "checked-status" (in the CBlock?). CBlock should be able to - // check its own merkle root and cache that check. - if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) - return READ_STATUS_FAILED; // Possible Short ID collision - return READ_STATUS_CHECKBLOCK_FAILED; + // Check for possible mutations early now that we have a seemingly good block + IsBlockMutatedFn check_mutated{m_check_block_mutated_mock ? m_check_block_mutated_mock : IsBlockMutated}; + if (check_mutated(/*block=*/block, + /*check_witness_root=*/segwit_active)) { + return READ_STATUS_FAILED; // Possible Short ID collision } + if (LogAcceptCategory(BCLog::CMPCTBLOCK, BCLog::Level::Debug)) { + const uint256 hash{block.GetHash()}; // avoid cleared header LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size()); if (vtx_missing.size() < 5) { for (const auto& tx : vtx_missing) { LogDebug(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString()); } } + } return READ_STATUS_OK; } diff --git a/src/blockencodings.h b/src/blockencodings.h index c92aa05e80..fce59bc561 100644 --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -84,8 +84,6 @@ typedef enum ReadStatus_t READ_STATUS_OK, READ_STATUS_INVALID, // Invalid object, peer is sending bogus crap READ_STATUS_FAILED, // Failed to process object - READ_STATUS_CHECKBLOCK_FAILED, // Used only by FillBlock to indicate a - // failure in CheckBlock. } ReadStatus; class CBlockHeaderAndShortTxIDs { @@ -141,15 +139,16 @@ class PartiallyDownloadedBlock { CBlockHeader header; // Can be overridden for testing - using CheckBlockFn = std::function; - CheckBlockFn m_check_block_mock{nullptr}; + using IsBlockMutatedFn = std::function; + IsBlockMutatedFn m_check_block_mutated_mock{nullptr}; explicit PartiallyDownloadedBlock(CTxMemPool* poolIn) : pool(poolIn) {} // extra_txn is a list of extra orphan/conflicted/etc transactions to look at ReadStatus InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector& extra_txn); bool IsTxAvailable(size_t index) const; - ReadStatus FillBlock(CBlock& block, const std::vector& vtx_missing); + // segwit_active enforces witness mutation checks just before reporting a healthy status + ReadStatus FillBlock(CBlock& block, const std::vector& vtx_missing, bool segwit_active); }; #endif // BITCOIN_BLOCKENCODINGS_H diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 7290b31479..c123fa5be5 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -77,8 +77,8 @@ void ReadRegTestArgs(const ArgsManager& args, CChainParams::RegTestOptions& opti for (const std::string& strDeployment : args.GetArgs("-vbparams")) { std::vector vDeploymentParams = SplitString(strDeployment, ':'); - if (vDeploymentParams.size() < 3 || 4 < vDeploymentParams.size()) { - throw std::runtime_error("Version bits parameters malformed, expecting deployment:start:end[:min_activation_height]"); + if (vDeploymentParams.size() < 3 || 7 < vDeploymentParams.size()) { + throw std::runtime_error("Version bits parameters malformed, expecting deployment:start:end[:min_activation_height[:max_activation_height[:active_duration[:threshold]]]]"); } CChainParams::VersionBitsParameters vbparams{}; if (!ParseInt64(vDeploymentParams[1], &vbparams.start_time)) { @@ -94,12 +94,31 @@ void ReadRegTestArgs(const ArgsManager& args, CChainParams::RegTestOptions& opti } else { vbparams.min_activation_height = 0; } + if (vDeploymentParams.size() >= 5) { + if (!ParseInt32(vDeploymentParams[4], &vbparams.max_activation_height)) { + throw std::runtime_error(strprintf("Invalid max_activation_height (%s)", vDeploymentParams[4])); + } + } + if (vDeploymentParams.size() >= 6) { + if (!ParseInt32(vDeploymentParams[5], &vbparams.active_duration)) { + throw std::runtime_error(strprintf("Invalid active_duration (%s)", vDeploymentParams[5])); + } + } + if (vDeploymentParams.size() >= 7) { + if (!ParseInt32(vDeploymentParams[6], &vbparams.threshold)) { + throw std::runtime_error(strprintf("Invalid threshold (%s)", vDeploymentParams[6])); + } + } + // Validate that timeout and max_activation_height are mutually exclusive + if (vbparams.timeout != Consensus::BIP9Deployment::NO_TIMEOUT && vbparams.max_activation_height < std::numeric_limits::max()) { + throw std::runtime_error(strprintf("Cannot specify both timeout (%ld) and max_activation_height (%d) for deployment %s. Use timeout for BIP9 or max_activation_height for mandatory activation deadline, not both.", vbparams.timeout, vbparams.max_activation_height, vDeploymentParams[0])); + } bool found = false; for (int j=0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { if (vDeploymentParams[0] == VersionBitsDeploymentInfo[j].name) { options.version_bits_parameters[Consensus::DeploymentPos(j)] = vbparams; found = true; - LogPrintf("Setting version bits activation parameters for %s to start=%ld, timeout=%ld, min_activation_height=%d\n", vDeploymentParams[0], vbparams.start_time, vbparams.timeout, vbparams.min_activation_height); + LogPrintf("Setting version bits activation parameters for %s to start=%ld, timeout=%ld, min_activation_height=%d, max_activation_height=%d, active_duration=%d, threshold=%d\n", vDeploymentParams[0], vbparams.start_time, vbparams.timeout, vbparams.min_activation_height, vbparams.max_activation_height, vbparams.active_duration, vbparams.threshold); break; } } diff --git a/src/clientversion.cpp b/src/clientversion.cpp index 4363f3c87f..756ecc54e0 100644 --- a/src/clientversion.cpp +++ b/src/clientversion.cpp @@ -71,9 +71,10 @@ std::string FormatSubVersion(const std::string& name, int nClientVersion, const if (!base_name_only) { static const auto ua_knots = []() -> std::string { const auto pos{CLIENT_BUILD.find(".pyblock")}; - return "PyBLOCK:" + CLIENT_BUILD.substr(pos + 8) + "/"; + return "PyBLOCK+BIP110:" + CLIENT_BUILD.substr(pos + 8) + "/"; }(); ua += ua_knots; + ua += "UASF-BIP110:0.4/"; } return ua; } @@ -85,7 +86,7 @@ std::string CopyrightHolders(const std::string& strPrefix) // Make sure Bitcoin Core copyright is not removed by accident if (copyright_devs.find("Bitcoin Core") == std::string::npos) { - strCopyrightHolders += "\n" + strPrefix + "The Bitcoin Core developers"; + strCopyrightHolders += "\n" + strPrefix + "The Bitcoin Core Spammers"; } return strCopyrightHolders; } diff --git a/src/common/args.cpp b/src/common/args.cpp index dc3f056fcc..4d5e3cf31b 100644 --- a/src/common/args.cpp +++ b/src/common/args.cpp @@ -118,7 +118,7 @@ std::optional InterpretValue(const KeyInfo& key, const st } // Double negatives like -nofoo=0 are supported (but discouraged) if (value && !InterpretBool(*value)) { - LogPrintf("Warning: parsed potentially confusing double-negative -%s=%s\n", key.name, *value); + LogWarning("Parsed potentially confusing double-negative -%s=%s", key.name, *value); return true; } return false; @@ -397,7 +397,7 @@ static void SaveErrors(const std::vector errors, std::vectoremplace_back(error); } else { - LogPrintf("%s\n", error); + LogWarning("%s", error); } } } @@ -419,7 +419,7 @@ bool ArgsManager::ReadSettingsFile(std::vector* errors) for (const auto& setting : m_settings.rw_settings) { KeyInfo key = InterpretKey(setting.first); // Split setting key into section and argname if (!GetArgFlags('-' + key.name)) { - LogPrintf("Ignoring unknown rw_settings value %s\n", setting.first); + LogWarning("Ignoring unknown rw_settings value %s", setting.first); } } return true; diff --git a/src/common/config.cpp b/src/common/config.cpp index 8fde9ef637..59dec7566b 100644 --- a/src/common/config.cpp +++ b/src/common/config.cpp @@ -85,7 +85,7 @@ bool IsConfSupported(KeyInfo& key, std::string& error) { if (key.name == "reindex") { // reindex can be set in a config file but it is strongly discouraged as this will cause the node to reindex on // every restart. Allow the config but throw a warning - LogPrintf("Warning: reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary\n"); + LogWarning("reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary"); return true; } return true; @@ -113,7 +113,7 @@ bool ArgsManager::ReadConfigStream(std::istream& stream, const std::string& file m_settings.ro_config[key.section][key.name].push_back(*value); } else { if (ignore_invalid_keys) { - LogPrintf("Ignoring unknown configuration value %s\n", option.first); + LogWarning("Ignoring unknown configuration value %s", option.first); } else { error = strprintf("Invalid configuration value %s", option.first); return false; diff --git a/src/common/netif.cpp b/src/common/netif.cpp index 7424f977c7..7c85db28cf 100644 --- a/src/common/netif.cpp +++ b/src/common/netif.cpp @@ -93,7 +93,12 @@ std::optional QueryDefaultGatewayImpl(sa_family_t family) return std::nullopt; } - for (nlmsghdr* hdr = (nlmsghdr*)response; NLMSG_OK(hdr, recv_result); hdr = NLMSG_NEXT(hdr, recv_result)) { +#if defined(__FreeBSD_version) && __FreeBSD_version >= 1500029 + using recv_result_t = size_t; +#else + using recv_result_t = int64_t; +#endif + for (nlmsghdr* hdr = (nlmsghdr*)response; NLMSG_OK(hdr, static_cast(recv_result)); hdr = NLMSG_NEXT(hdr, recv_result)) { rtmsg* r = (rtmsg*)NLMSG_DATA(hdr); int remaining_len = RTM_PAYLOAD(hdr); diff --git a/src/common/pcp.cpp b/src/common/pcp.cpp index d0d4955470..12c6fb4c08 100644 --- a/src/common/pcp.cpp +++ b/src/common/pcp.cpp @@ -15,6 +15,9 @@ #include #include #include +#include + +bool g_pcp_warn_for_unauthorized{false}; namespace { @@ -80,6 +83,8 @@ constexpr size_t NATPMP_MAP_RESPONSE_LIFETIME_OFS = 12; constexpr uint8_t NATPMP_RESULT_SUCCESS = 0; //! Result code representing unsupported version. constexpr uint8_t NATPMP_RESULT_UNSUPP_VERSION = 1; +//! Result code representing not authorized (router doesn't support port mapping). +constexpr uint8_t NATPMP_RESULT_NOT_AUTHORIZED = 2; //! Result code representing lack of resources. constexpr uint8_t NATPMP_RESULT_NO_RESOURCES = 4; @@ -143,6 +148,8 @@ constexpr size_t PCP_MAP_EXTERNAL_IP_OFS = 20; //! Result code representing success (RFC6887 7.4), shared with NAT-PMP. constexpr uint8_t PCP_RESULT_SUCCESS = NATPMP_RESULT_SUCCESS; +//! Result code representing not authorized (RFC6887 7.4), shared with NAT-PMP. +constexpr uint8_t PCP_RESULT_NOT_AUTHORIZED = NATPMP_RESULT_NOT_AUTHORIZED; //! Result code representing lack of resources (RFC6887 7.4). constexpr uint8_t PCP_RESULT_NO_RESOURCES = 8; @@ -217,7 +224,8 @@ CNetAddr PCPUnwrapAddress(Span wrapped_addr) //! PCP or NAT-PMP send-receive loop. std::optional> PCPSendRecv(Sock &sock, const std::string &protocol, Span request, int num_tries, std::chrono::milliseconds timeout_per_try, - std::function)> check_packet) + std::function)> check_packet, + CThreadInterrupt& interrupt) { using namespace std::chrono; // UDP is a potentially lossy protocol, so we try to send again a few times. @@ -230,7 +238,7 @@ std::optional> PCPSendRecv(Sock &sock, const std::string &p } // Dispatch packet to gateway. if (sock.Send(request.data(), request.size(), 0) != static_cast(request.size())) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "%s: Could not send request: %s\n", protocol, NetworkErrorString(WSAGetLastError())); + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Could not send request: %s\n", protocol, NetworkErrorString(WSAGetLastError())); return std::nullopt; // Network-level error, probably no use retrying. } @@ -238,6 +246,7 @@ std::optional> PCPSendRecv(Sock &sock, const std::string &p auto cur_time = time_point_cast(MockableSteadyClock::now()); auto deadline = cur_time + timeout_per_try; while ((cur_time = time_point_cast(MockableSteadyClock::now())) < deadline) { + if (interrupt) return std::nullopt; Sock::Event occurred = 0; if (!sock.Wait(deadline - cur_time, Sock::RECV, &occurred)) { LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "%s: Could not wait on socket: %s\n", protocol, NetworkErrorString(WSAGetLastError())); @@ -251,7 +260,7 @@ std::optional> PCPSendRecv(Sock &sock, const std::string &p // Receive response. recvsz = sock.Recv(response, sizeof(response), MSG_DONTWAIT); if (recvsz < 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "%s: Could not receive response: %s\n", protocol, NetworkErrorString(WSAGetLastError())); + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Could not receive response: %s\n", protocol, NetworkErrorString(WSAGetLastError())); return std::nullopt; // Network-level error, probably no use retrying. } LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Received response of %d bytes: %s\n", protocol, recvsz, HexStr(Span(response, recvsz))); @@ -271,7 +280,7 @@ std::optional> PCPSendRecv(Sock &sock, const std::string &p } -std::variant NATPMPRequestPortMap(const CNetAddr &gateway, uint16_t port, uint32_t lifetime, int num_tries, std::chrono::milliseconds timeout_per_try) +std::variant NATPMPRequestPortMap(const CNetAddr &gateway, uint16_t port, uint32_t lifetime, CThreadInterrupt& interrupt, int num_tries, std::chrono::milliseconds timeout_per_try) { struct sockaddr_storage dest_addr; socklen_t dest_addrlen = sizeof(struct sockaddr_storage); @@ -319,7 +328,8 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g return false; // Wasn't response to what we expected, try receiving next packet. } return true; - }); + }, + interrupt); struct in_addr external_addr; if (recv_res) { @@ -361,13 +371,26 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g return false; // Wasn't response to what we expected, try receiving next packet. } return true; - }); + }, + interrupt); if (recv_res) { const std::span response = *recv_res; Assume(response.size() >= NATPMP_MAP_RESPONSE_SIZE); uint16_t result_code = ReadBE16(response.data() + NATPMP_RESPONSE_HDR_RESULT_OFS); + static bool already_warned_for_unauthorized{false}; + if (result_code == NATPMP_RESULT_NOT_AUTHORIZED) { + if (already_warned_for_unauthorized && !g_pcp_warn_for_unauthorized) { + // NOT_AUTHORIZED is expected on many routers that don't support port mapping. + LogDebug(BCLog::NET, "natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); + return MappingError::PROTOCOL_ERROR; + } else { + already_warned_for_unauthorized = true; + } + } else { + already_warned_for_unauthorized = false; + } if (result_code != NATPMP_RESULT_SUCCESS) { LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); if (result_code == NATPMP_RESULT_NO_RESOURCES) { @@ -384,7 +407,7 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g } } -std::variant PCPRequestPortMap(const PCPMappingNonce &nonce, const CNetAddr &gateway, const CNetAddr &bind, uint16_t port, uint32_t lifetime, int num_tries, std::chrono::milliseconds timeout_per_try) +std::variant PCPRequestPortMap(const PCPMappingNonce &nonce, const CNetAddr &gateway, const CNetAddr &bind, uint16_t port, uint32_t lifetime, CThreadInterrupt& interrupt, int num_tries, std::chrono::milliseconds timeout_per_try) { struct sockaddr_storage dest_addr, bind_addr; socklen_t dest_addrlen = sizeof(struct sockaddr_storage), bind_addrlen = sizeof(struct sockaddr_storage); @@ -484,7 +507,8 @@ std::variant PCPRequestPortMap(const PCPMappingNonc return false; // Wasn't response to what we expected, try receiving next packet. } return true; - }); + }, + interrupt); if (!recv_res) { return MappingError::NETWORK_ERROR; @@ -501,6 +525,18 @@ std::variant PCPRequestPortMap(const PCPMappingNonc uint32_t lifetime_ret = ReadBE32(response.data() + PCP_HDR_LIFETIME_OFS); uint16_t external_port = ReadBE16(response.data() + PCP_HDR_SIZE + PCP_MAP_EXTERNAL_PORT_OFS); CNetAddr external_addr{PCPUnwrapAddress(response.subspan(PCP_HDR_SIZE + PCP_MAP_EXTERNAL_IP_OFS, ADDR_IPV6_SIZE))}; + static bool already_warned_for_unauthorized{false}; + if (result_code == PCP_RESULT_NOT_AUTHORIZED) { + if (already_warned_for_unauthorized && !g_pcp_warn_for_unauthorized) { + // NOT_AUTHORIZED is expected on many routers that don't support port mapping. + LogDebug(BCLog::NET, "pcp: Mapping failed with result %s\n", PCPResultString(result_code)); + return MappingError::PROTOCOL_ERROR; + } else { + already_warned_for_unauthorized = true; + } + } else { + already_warned_for_unauthorized = false; + } if (result_code != PCP_RESULT_SUCCESS) { LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Mapping failed with result %s\n", PCPResultString(result_code)); if (result_code == PCP_RESULT_NO_RESOURCES) { diff --git a/src/common/pcp.h b/src/common/pcp.h index 44f9285c27..3d2a7ea7f7 100644 --- a/src/common/pcp.h +++ b/src/common/pcp.h @@ -6,6 +6,7 @@ #define BITCOIN_COMMON_PCP_H #include +#include #include @@ -43,6 +44,8 @@ struct MappingResult { std::string ToString() const; }; +extern bool g_pcp_warn_for_unauthorized; + //! Try to open a port using RFC 6886 NAT-PMP. IPv4 only. //! //! * gateway: Destination address for PCP requests (usually the default gateway). @@ -51,7 +54,7 @@ struct MappingResult { //! * num_tries: Number of tries in case of no response. //! //! Returns the external_ip:external_port of the mapping if successful, otherwise a MappingError. -std::variant NATPMPRequestPortMap(const CNetAddr &gateway, uint16_t port, uint32_t lifetime, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000)); +std::variant NATPMPRequestPortMap(const CNetAddr &gateway, uint16_t port, uint32_t lifetime, CThreadInterrupt& interrupt, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000)); //! Try to open a port using RFC 6887 Port Control Protocol (PCP). Handles IPv4 and IPv6. //! @@ -63,6 +66,6 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g //! * num_tries: Number of tries in case of no response. //! //! Returns the external_ip:external_port of the mapping if successful, otherwise a MappingError. -std::variant PCPRequestPortMap(const PCPMappingNonce &nonce, const CNetAddr &gateway, const CNetAddr &bind, uint16_t port, uint32_t lifetime, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000)); +std::variant PCPRequestPortMap(const PCPMappingNonce &nonce, const CNetAddr &gateway, const CNetAddr &bind, uint16_t port, uint32_t lifetime, CThreadInterrupt& interrupt, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000)); #endif // BITCOIN_COMMON_PCP_H diff --git a/src/common/system.cpp b/src/common/system.cpp index 7af792db44..590bfd2abb 100644 --- a/src/common/system.cpp +++ b/src/common/system.cpp @@ -30,9 +30,6 @@ using util::ReplaceAll; -// Application startup time (used for uptime calculation) -const int64_t nStartupTime = GetTime(); - #ifndef WIN32 std::string ShellEscape(const std::string& arg) { @@ -51,8 +48,9 @@ void runCommand(const std::string& strCommand) #else int nErr = ::_wsystem(std::wstring_convert,wchar_t>().from_bytes(strCommand).c_str()); #endif - if (nErr) - LogPrintf("runCommand error: system(%s) returned %d\n", strCommand, nErr); + if (nErr) { + LogWarning("runCommand error: system(%s) returned %d", strCommand, nErr); + } } #endif @@ -105,8 +103,8 @@ int GetNumCores() return std::thread::hardware_concurrency(); } -// Obtain the application startup time (used for uptime calculation) -int64_t GetStartupTime() -{ - return nStartupTime; -} +namespace { + const auto g_startup_time{SteadyClock::now()}; +} // namespace + +SteadyClock::duration GetUptime() { return SteadyClock::now() - g_startup_time; } diff --git a/src/common/system.h b/src/common/system.h index a4b56be9ac..21841b789b 100644 --- a/src/common/system.h +++ b/src/common/system.h @@ -7,12 +7,14 @@ #define BITCOIN_COMMON_SYSTEM_H #include // IWYU pragma: keep +#include +#include #include #include -// Application startup time (used for uptime calculation) -int64_t GetStartupTime(); +/// Monotonic uptime (not affected by system time changes). +SteadyClock::duration GetUptime(); void SetupEnvironment(); [[nodiscard]] bool SetupNetworking(); diff --git a/src/consensus/consensus.h b/src/consensus/consensus.h index cffe9cdafd..b02773f490 100644 --- a/src/consensus/consensus.h +++ b/src/consensus/consensus.h @@ -34,4 +34,7 @@ static constexpr unsigned int LOCKTIME_VERIFY_SEQUENCE = (1 << 0); */ static constexpr int64_t MAX_TIMEWARP = 600; +static constexpr unsigned int MAX_OUTPUT_SCRIPT_SIZE{34}; +static constexpr unsigned int MAX_OUTPUT_DATA_SIZE{83}; + #endif // BITCOIN_CONSENSUS_CONSENSUS_H diff --git a/src/consensus/params.h b/src/consensus/params.h index dd29b9408e..c2ac24bfb5 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -32,6 +32,7 @@ constexpr bool ValidDeployment(BuriedDeployment dep) { return dep <= DEPLOYMENT_ enum DeploymentPos : uint16_t { DEPLOYMENT_TESTDUMMY, DEPLOYMENT_TAPROOT, // Deployment of Schnorr/Taproot (BIPs 340-342) + DEPLOYMENT_REDUCED_DATA, // ReducedData Temporary Softfork (RDTS) // NOTE: Also add new deployments to VersionBitsDeploymentInfo in deploymentinfo.cpp MAX_VERSION_BITS_DEPLOYMENTS }; @@ -52,6 +53,16 @@ struct BIP9Deployment { * boundary. */ int min_activation_height{0}; + /** Maximum height for activation. If less than INT_MAX, the deployment will activate + * at this height regardless of signaling (similar to BIP8 flag day). + * std::numeric_limits::max() means no maximum (activation only via signaling). */ + int max_activation_height{std::numeric_limits::max()}; + /** For temporary softforks: number of blocks the deployment remains active after activation. + * std::numeric_limits::max() means permanent (never expires). */ + int active_duration{std::numeric_limits::max()}; + /** Per-deployment activation threshold. If 0, uses the global nRuleChangeActivationThreshold. + * Otherwise, specifies the number of blocks required for this specific deployment. */ + int threshold{0}; /** Constant for nTimeout very far in the future. */ static constexpr int64_t NO_TIMEOUT = std::numeric_limits::max(); diff --git a/src/consensus/tx_verify.cpp b/src/consensus/tx_verify.cpp index 95466b759c..84558503c7 100644 --- a/src/consensus/tx_verify.cpp +++ b/src/consensus/tx_verify.cpp @@ -161,7 +161,18 @@ int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& i return nSigOps; } -bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee) +bool Consensus::CheckOutputSizes(const CTransaction& tx, TxValidationState& state) +{ + for (const auto& txout : tx.vout) { + if (txout.scriptPubKey.empty()) continue; + if (txout.scriptPubKey.size() > ((txout.scriptPubKey[0] == OP_RETURN) ? MAX_OUTPUT_DATA_SIZE : MAX_OUTPUT_SCRIPT_SIZE)) { + return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "bad-txns-vout-script-toolarge"); + } + } + return true; +} + +bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee, const CheckTxInputsRules rules) { // are the actual inputs available? if (!inputs.HaveInputs(tx)) { @@ -169,6 +180,11 @@ bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, strprintf("%s: inputs missing/spent", __func__)); } + // NOTE: CheckTransaction is arguably the more logical place to do this, but it's context-independent, so this is probably the next best place for now + if (rules.test(CheckTxInputsRules::OutputSizeLimit) && !CheckOutputSizes(tx, state)) { + return false; + } + CAmount nValueIn = 0; for (unsigned int i = 0; i < tx.vin.size(); ++i) { const COutPoint &prevout = tx.vin[i].prevout; diff --git a/src/consensus/tx_verify.h b/src/consensus/tx_verify.h index d2cf792cf3..8111a5b77a 100644 --- a/src/consensus/tx_verify.h +++ b/src/consensus/tx_verify.h @@ -17,14 +17,45 @@ class TxValidationState; /** Transaction validation functions */ +class CheckTxInputsRules { + using underlying_type = unsigned int; + underlying_type m_flags; + constexpr explicit CheckTxInputsRules(underlying_type flags) noexcept : m_flags(flags) {} + + enum class Rule { + None = 0, + OutputSizeLimit = 1 << 0, + }; + +public: + using enum Rule; + + constexpr CheckTxInputsRules(Rule rule) noexcept : m_flags(static_cast(rule)) {} + + [[nodiscard]] constexpr bool test(CheckTxInputsRules rules) const noexcept { + return (m_flags & rules.m_flags) == rules.m_flags; + } + + [[nodiscard]] constexpr CheckTxInputsRules operator|(const CheckTxInputsRules other) const noexcept { + return CheckTxInputsRules{m_flags | other.m_flags}; + } +}; + namespace Consensus { +/** + * Check whether all outputs of this transaction satisfy size limits. + * Regular outputs must be <= MAX_OUTPUT_SCRIPT_SIZE (34 bytes). + * OP_RETURN outputs must be <= MAX_OUTPUT_DATA_SIZE (83 bytes). + */ +bool CheckOutputSizes(const CTransaction& tx, TxValidationState& state); + /** * Check whether all inputs of this transaction are valid (no double spends and amounts) * This does not modify the UTXO set. This does not check scripts and sigs. * @param[out] txfee Set to the transaction fee if successful. * Preconditions: tx.IsCoinBase() is false. */ -[[nodiscard]] bool CheckTxInputs(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee); +[[nodiscard]] bool CheckTxInputs(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee, CheckTxInputsRules rules); } // namespace Consensus /** Auxiliary functions for transaction validation (ideally should not be exposed) */ diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp index 611ff7056c..4c466d7934 100644 --- a/src/crypto/sha256.cpp +++ b/src/crypto/sha256.cpp @@ -636,7 +636,7 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem Transform = sha256_x86_shani::Transform; TransformD64 = TransformD64Wrapper; TransformD64_2way = sha256d64_x86_shani::Transform_2way; - ret = "x86_shani(1way,2way)"; + ret = "x86_shani(1way;2way)"; have_sse4 = false; // Disable SSE4/AVX2; have_avx2 = false; } @@ -650,14 +650,14 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem #endif #if defined(ENABLE_SSE41) TransformD64_4way = sha256d64_sse41::Transform_4way; - ret += ",sse41(4way)"; + ret += ";sse41(4way)"; #endif } #if defined(ENABLE_AVX2) if (have_avx2 && have_avx && enabled_avx) { TransformD64_8way = sha256d64_avx2::Transform_8way; - ret += ",avx2(8way)"; + ret += ";avx2(8way)"; } #endif #elif (defined(__linux__)) && defined(ENABLE_POWER8) @@ -697,7 +697,7 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem Transform = sha256_arm_shani::Transform; TransformD64 = TransformD64Wrapper; TransformD64_2way = sha256d64_arm_shani::Transform_2way; - ret = "arm_shani(1way,2way)"; + ret = "arm_shani(1way;2way)"; } #endif #endif // DISABLE_OPTIMIZED_SHA256 diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp index ba92f5cd96..14242fe8e5 100644 --- a/src/dbwrapper.cpp +++ b/src/dbwrapper.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -48,8 +49,8 @@ static void HandleError(const leveldb::Status& status) if (status.ok()) return; const std::string errmsg = "Fatal LevelDB error: " + status.ToString(); - LogPrintf("%s\n", errmsg); - LogPrintf("You can use -debug=leveldb to get more complete diagnostic messages\n"); + LogError("%s", errmsg); + LogInfo("You can use -debug=leveldb to get more complete diagnostic messages"); throw dbwrapper_error(errmsg); } @@ -296,8 +297,7 @@ CDBWrapper::CDBWrapper(const DBParams& params) LogPrintf("Finished database compaction of %s\n", fs::PathToString(params.path)); } - // The base-case obfuscation key, which is a noop. - obfuscate_key = std::vector(OBFUSCATE_KEY_NUM_BYTES, '\000'); + assert(!obfuscate_key); // Needed for unobfuscated Read()/Write() below bool key_exists = Read(OBFUSCATE_KEY_KEY, obfuscate_key); @@ -308,12 +308,11 @@ CDBWrapper::CDBWrapper(const DBParams& params) // Write `new_key` so we don't obfuscate the key with itself Write(OBFUSCATE_KEY_KEY, new_key); - obfuscate_key = new_key; + Read(CDBWrapper::OBFUSCATE_KEY_KEY, obfuscate_key); - LogPrintf("Wrote new obfuscate key for %s: %s\n", fs::PathToString(params.path), HexStr(obfuscate_key)); + LogInfo("Wrote new obfuscation key for %s: %s", fs::PathToString(params.path), obfuscate_key.HexKey()); } - - LogPrintf("Using obfuscation key for %s: %s\n", fs::PathToString(params.path), HexStr(obfuscate_key)); + LogInfo("Using obfuscation key for %s: %s", fs::PathToString(params.path), obfuscate_key.HexKey()); } CDBWrapper::~CDBWrapper() @@ -364,16 +363,13 @@ size_t CDBWrapper::DynamicMemoryUsage() const // past the null-terminator. const std::string CDBWrapper::OBFUSCATE_KEY_KEY("\000obfuscate_key", 14); -const unsigned int CDBWrapper::OBFUSCATE_KEY_NUM_BYTES = 8; - /** * Returns a string (consisting of 8 random bytes) suitable for use as an * obfuscating XOR key. */ std::vector CDBWrapper::CreateObfuscateKey() const { - std::vector ret(OBFUSCATE_KEY_NUM_BYTES); - GetRandBytes(ret); + auto ret = FastRandomContext{}.randbytes(Obfuscation::KEY_SIZE); return ret; } @@ -385,7 +381,7 @@ std::optional CDBWrapper::ReadImpl(Span key) const if (!status.ok()) { if (status.IsNotFound()) return std::nullopt; - LogPrintf("LevelDB read failure: %s\n", status.ToString()); + LogError("LevelDB read failure: %s", status.ToString()); HandleError(status); } return strValue; @@ -400,7 +396,7 @@ bool CDBWrapper::ExistsImpl(Span key) const if (!status.ok()) { if (status.IsNotFound()) return false; - LogPrintf("LevelDB read failure: %s\n", status.ToString()); + LogError("LevelDB read failure: %s", status.ToString()); HandleError(status); } return true; @@ -460,7 +456,7 @@ void CDBIterator::Next() { m_impl_iter->iter->Next(); } namespace dbwrapper_private { -const std::vector& GetObfuscateKey(const CDBWrapper &w) +const Obfuscation& GetObfuscateKey(const CDBWrapper& w) { return w.obfuscate_key; } diff --git a/src/dbwrapper.h b/src/dbwrapper.h index 89d0f17763..e721e17166 100644 --- a/src/dbwrapper.h +++ b/src/dbwrapper.h @@ -70,7 +70,7 @@ namespace dbwrapper_private { * Database obfuscation should be considered an implementation detail of the * specific database. */ -const std::vector& GetObfuscateKey(const CDBWrapper &w); +const Obfuscation& GetObfuscateKey(const CDBWrapper&); }; // namespace dbwrapper_private @@ -190,7 +190,7 @@ struct LevelDBContext; class CDBWrapper { - friend const std::vector& dbwrapper_private::GetObfuscateKey(const CDBWrapper &w); + friend const Obfuscation& dbwrapper_private::GetObfuscateKey(const CDBWrapper&); private: //! holds all leveldb-specific fields of this class std::unique_ptr m_db_context; @@ -198,15 +198,12 @@ class CDBWrapper //! the name of this database std::string m_name; - //! a key used for optional XOR-obfuscation of the database - std::vector obfuscate_key; + //! optional XOR-obfuscation of the database + Obfuscation obfuscate_key; //! the key under which the obfuscation key is stored static const std::string OBFUSCATE_KEY_KEY; - //! the length of the obfuscate key in number of bytes - static const unsigned int OBFUSCATE_KEY_NUM_BYTES; - std::vector CreateObfuscateKey() const; //! path to filesystem storage diff --git a/src/deploymentinfo.cpp b/src/deploymentinfo.cpp index 185a7dcb54..200f5fd263 100644 --- a/src/deploymentinfo.cpp +++ b/src/deploymentinfo.cpp @@ -17,6 +17,10 @@ const struct VBDeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_B /*.name =*/ "taproot", /*.gbt_force =*/ true, }, + { + /*.name =*/ "reduced_data", + /*.gbt_force =*/ true, + }, }; std::string DeploymentName(Consensus::BuriedDeployment dep) diff --git a/src/deploymentstatus.h b/src/deploymentstatus.h index 03d3c531cc..26c66e8be7 100644 --- a/src/deploymentstatus.h +++ b/src/deploymentstatus.h @@ -20,7 +20,15 @@ inline bool DeploymentActiveAfter(const CBlockIndex* pindexPrev, const Consensus inline bool DeploymentActiveAfter(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos dep, VersionBitsCache& versionbitscache) { assert(Consensus::ValidDeployment(dep)); - return ThresholdState::ACTIVE == versionbitscache.State(pindexPrev, params, dep); + if (ThresholdState::ACTIVE != versionbitscache.State(pindexPrev, params, dep)) return false; + + const auto& deployment = params.vDeployments[dep]; + // Permanent deployment (never expires) + if (deployment.active_duration == std::numeric_limits::max()) return true; + + const int activation_height = versionbitscache.StateSinceHeight(pindexPrev, params, dep); + const int height = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; + return height < activation_height + deployment.active_duration; } /** Determine if a deployment is active for this block */ @@ -49,4 +57,17 @@ inline bool DeploymentEnabled(const Consensus::Params& params, Consensus::Deploy return params.vDeployments[dep].nStartTime != Consensus::BIP9Deployment::NEVER_ACTIVE; } +/** Determine if mandatory signaling is required for a deployment at the next block */ +inline bool DeploymentMustSignalAfter(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos dep, ThresholdState state) +{ + assert(Consensus::ValidDeployment(dep)); + const auto& deployment = params.vDeployments[dep]; + if (deployment.max_activation_height >= std::numeric_limits::max()) return false; + if (state != ThresholdState::STARTED) return false; // If must_signal height is reached before start time, abstain from enforcement + const int nPeriod = params.nMinerConfirmationWindow; + const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; + return nHeight >= deployment.max_activation_height - (2 * nPeriod) + && nHeight < deployment.max_activation_height - nPeriod; +} + #endif // BITCOIN_DEPLOYMENTSTATUS_H diff --git a/src/flatfile.cpp b/src/flatfile.cpp index df6596e940..33d8baf44f 100644 --- a/src/flatfile.cpp +++ b/src/flatfile.cpp @@ -41,11 +41,11 @@ FILE* FlatFileSeq::Open(const FlatFilePos& pos, bool read_only) const if (!file && !read_only) file = fsbridge::fopen(path, "wb+"); if (!file) { - LogPrintf("Unable to open file %s\n", fs::PathToString(path)); + LogError("Unable to open file %s", fs::PathToString(path)); return nullptr; } if (pos.nPos && fseek(file, pos.nPos, SEEK_SET)) { - LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, fs::PathToString(path)); + LogError("Unable to seek to position %u of %s", pos.nPos, fs::PathToString(path)); if (fclose(file) != 0) { LogError("Unable to close file %s", fs::PathToString(path)); } diff --git a/src/httprpc.cpp b/src/httprpc.cpp index 4766c32cdb..db0b9ff6b3 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -168,7 +168,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) jreq.context = context; jreq.peerAddr = req->GetPeer().ToStringAddrPort(); if (!RPCAuthorized(authHeader.second, jreq.authUser, jreq.m_wallet_restriction)) { - LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", jreq.peerAddr); + LogWarning("ThreadRPCServer incorrect password attempt from %s", jreq.peerAddr); /* Deter brute-forcing If this results in a DoS the user really @@ -192,7 +192,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) UniValue reply; bool user_has_whitelist = g_rpc_whitelist.count(jreq.authUser); if (!user_has_whitelist && g_rpc_whitelist_default) { - LogPrintf("RPC User %s not allowed to call any methods\n", jreq.authUser); + LogWarning("RPC User %s not allowed to call any methods", jreq.authUser); req->WriteReply(HTTP_FORBIDDEN); return false; @@ -200,7 +200,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) } else if (valRequest.isObject()) { jreq.parse(valRequest); if (user_has_whitelist && !g_rpc_whitelist[jreq.authUser].count(jreq.strMethod)) { - LogPrintf("RPC User %s not allowed to call method %s\n", jreq.authUser, jreq.strMethod); + LogWarning("RPC User %s not allowed to call method %s", jreq.authUser, jreq.strMethod); req->WriteReply(HTTP_FORBIDDEN); return false; } @@ -230,7 +230,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) // Parse method std::string strMethod = request.find_value("method").get_str(); if (!g_rpc_whitelist[jreq.authUser].count(strMethod)) { - LogPrintf("RPC User %s not allowed to call method %s\n", jreq.authUser, strMethod); + LogWarning("RPC User %s not allowed to call method %s", jreq.authUser, strMethod); req->WriteReply(HTTP_FORBIDDEN); return false; } @@ -365,7 +365,7 @@ static bool InitRPCAuthentication() for (const std::string& rpcauth : gArgs.GetArgs("-rpcauth")) { if (rpcauth.empty()) continue; if (!AddRPCAuth(rpcauth)) { - LogPrintf("Invalid -rpcauth argument.\n"); + LogWarning("Invalid -rpcauth argument."); return false; } } diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 613a1d3d4f..a477e63854 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -330,7 +330,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg) if (g_work_queue->Enqueue(item.get())) { item.release(); /* if true, queue took ownership */ } else { - LogPrintf("WARNING: request rejected because http work queue depth exceeded, it can be increased with the -rpcworkqueue= setting\n"); + LogWarning("Request rejected because http work queue depth exceeded, it can be increased with the -rpcworkqueue= setting"); item->req->WriteReply(HTTP_SERVICE_UNAVAILABLE, "Work queue depth exceeded"); } } else { @@ -454,10 +454,10 @@ static bool HTTPBindAddresses(struct evhttp* http) endpoints.emplace_back("127.0.0.1", http_port); is_default = true; if (!gArgs.GetArgs("-rpcallowip").empty()) { - LogPrintf("WARNING: option -rpcallowip was specified without -rpcbind; this doesn't usually make sense\n"); + LogWarning("Option -rpcallowip was specified without -rpcbind; this doesn't usually make sense"); } if (!gArgs.GetArgs("-rpcbind").empty()) { - LogPrintf("WARNING: option -rpcbind was ignored because -rpcallowip was not specified, refusing to allow everyone to connect\n"); + InitWarning(_("Option -rpcbind was ignored because -rpcallowip was not specified, refusing to allow everyone to connect\n")); } } else { // Specific bind addresses for (const std::string& strRPCBind : gArgs.GetArgs("-rpcbind")) { @@ -480,7 +480,7 @@ static bool HTTPBindAddresses(struct evhttp* http) if (bind_handle) { const std::optional addr{LookupHost(i->first, false)}; if (i->first.empty() || (addr.has_value() && addr->IsBindAny())) { - LogPrintf("WARNING: the RPC server is not safe to expose to untrusted networks such as the public internet\n"); + LogWarning("The RPC server is not safe to expose to untrusted networks such as the public internet"); } // Set the no-delay option (disable Nagle's algorithm) on the TCP socket. evutil_socket_t fd = evhttp_bound_socket_get_fd(bind_handle); @@ -492,7 +492,7 @@ static bool HTTPBindAddresses(struct evhttp* http) } else { int err = EVUTIL_SOCKET_ERROR(); if (!is_default || (err != EADDRNOTAVAIL && err != ENOENT && err != EOPNOTSUPP && !ignorable_error)) { - LogPrintf("Binding RPC on address %s port %i failed (Error: %s).\n", i->first, i->second, NetworkErrorString(err)); + LogWarning("Binding RPC on address %s port %i failed (Error: %s).", i->first, i->second, NetworkErrorString(err)); num_fail += 1; } else { // Don't count failure if binding was not explicitly configured @@ -563,7 +563,7 @@ bool InitHTTPServer(const util::SignalInterrupt& interrupt) raii_evhttp http_ctr = obtain_evhttp(base_ctr.get()); struct evhttp* http = http_ctr.get(); if (!http) { - LogPrintf("couldn't create evhttp. Exiting.\n"); + LogError("Couldn't create evhttp. Exiting."); return false; } @@ -573,7 +573,7 @@ bool InitHTTPServer(const util::SignalInterrupt& interrupt) evhttp_set_gencb(http, http_request_cb, (void*)&interrupt); if (!HTTPBindAddresses(http)) { - LogPrintf("Unable to bind all endpoints for RPC server\n"); + LogError("Unable to bind all endpoints for RPC server"); return false; } @@ -703,7 +703,7 @@ HTTPRequest::~HTTPRequest() { if (!replySent) { // Keep track of whether reply was sent to avoid request leaks - LogPrintf("%s: Unhandled request\n", __func__); + LogWarning("Unhandled HTTP request"); WriteReply(HTTP_INTERNAL_SERVER_ERROR, "Unhandled request"); } // evhttpd cleans up the request, as long as a reply was sent. diff --git a/src/init.cpp b/src/init.cpp index 5feb90921b..23ab427b1c 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -83,6 +84,7 @@ #include #include #include +#include #include #include #include @@ -206,7 +208,7 @@ static void RemovePidFile(const ArgsManager& args) const auto pid_path{GetPidFile(args)}; if (std::error_code error; !fs::remove(pid_path, error)) { std::string msg{error ? error.message() : "File does not exist"}; - LogPrintf("Unable to remove PID file (%s): %s\n", fs::PathToString(pid_path), msg); + LogWarning("Unable to remove PID file (%s): %s", fs::PathToString(pid_path), msg); } } @@ -274,6 +276,8 @@ void Interrupt(NodeContext& node) #if HAVE_SYSTEM ShutdownNotify(*node.args); #endif + // Wake any threads that may be waiting for the tip to change. + if (node.notifications) WITH_LOCK(node.notifications->m_tip_block_mutex, node.notifications->m_tip_block_cv.notify_all()); InterruptHTTPServer(); InterruptHTTPRPC(); InterruptRPC(); @@ -670,7 +674,7 @@ void SetupServerArgs(ArgsManager& argsman, bool can_listen_ipc) argsman.AddArg("-checkpoints", strprintf("Enable rejection of any forks from the known historical chain until block %s (default: %u)", defaultChainParams->Checkpoints().GetHeight(), DEFAULT_CHECKPOINTS_ENABLED), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-deprecatedrpc=", "Allows deprecated RPC method(s) to be used", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u). Blocks after target height may be processed during shutdown.", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-limitancestorcount=", strprintf("Do not accept transactions if number of in-mempool ancestors is or more (default: %u)", DEFAULT_ANCESTOR_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-limitancestorsize=", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT_KVB), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-limitdescendantcount=", strprintf("Do not accept transactions if any ancestor would have or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); @@ -712,7 +716,8 @@ void SetupServerArgs(ArgsManager& argsman, bool can_listen_ipc) argsman.AddArg("-datacarriercost", strprintf("Treat extra data in transactions as at least N vbytes per actual byte (default: %s)", DEFAULT_WEIGHT_PER_DATA_BYTE / 4.0), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); argsman.AddArg("-datacarrierfullcount", strprintf("Apply datacarriersize limit to all known datacarrier methods (default: %u)", DEFAULT_DATACARRIER_FULLCOUNT), ArgsManager::ALLOW_ANY | (DEFAULT_DATACARRIER_FULLCOUNT ? uint32_t{ArgsManager::DEBUG_ONLY} : 0), OptionsCategory::NODE_RELAY); argsman.AddArg("-datacarriersize", - strprintf("Maximum size of data in data carrier transactions we relay and mine, in bytes (default: %u)", + strprintf("Maximum size of data in data carrier transactions we relay and mine, in bytes (maximum %s, default: %u)", + MAX_OUTPUT_DATA_SIZE, MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); argsman.AddArg("-maxscriptsize", strprintf("Maximum size of scripts (including the entire witness stack) we relay and mine, in bytes (default: %s)", DEFAULT_SCRIPT_SIZE_POLICY_LIMIT), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); @@ -837,6 +842,9 @@ static bool AppInitServers(NodeContext& node) // Parameter interaction based on rules void InitParameterInteraction(ArgsManager& args) { + // Before any SoftSetArg so we get the actual user-set value + g_pcp_warn_for_unauthorized = args.GetBoolArg("-natpmp", false); + if (args.GetBoolArg("-corepolicy", DEFAULT_COREPOLICY)) { args.SoftSetArg("-incrementalrelayfee", FormatMoney(CORE_INCREMENTAL_RELAY_FEE)); if (!args.IsArgSet("-minrelaytxfee")) { @@ -853,7 +861,6 @@ void InitParameterInteraction(ArgsManager& args) args.SoftSetArg("-rejectparasites", "0"); args.SoftSetArg("-datacarriercost", "0.25"); args.SoftSetArg("-datacarrierfullcount", "0"); - args.SoftSetArg("-datacarriersize", "83"); args.SoftSetArg("-maxtxlegacysigops", strprintf("%s", std::numeric_limits::max())); args.SoftSetArg("-maxscriptsize", strprintf("%s", std::numeric_limits::max())); args.SoftSetArg("-mempooltruc", "enforce"); @@ -903,9 +910,12 @@ void InitParameterInteraction(ArgsManager& args) if (!args.GetBoolArg("-listen", DEFAULT_LISTEN)) { // do not map ports or try to retrieve public IP when not listening (pointless) - if (args.SoftSetBoolArg("-upnp", false)) + if (args.GetBoolArg("-upnp", DEFAULT_UPNP)) { + args.ForceSetArg("-upnp", "0"); LogInfo("parameter interaction: -listen=0 -> setting -upnp=0\n"); - if (args.SoftSetBoolArg("-natpmp", false)) { + } + if (args.GetBoolArg("-natpmp", DEFAULT_NATPMP)) { + args.ForceSetArg("-natpmp", "0"); LogInfo("parameter interaction: -listen=0 -> setting -natpmp=0\n"); } if (args.SoftSetBoolArg("-discover", false)) @@ -965,7 +975,7 @@ namespace { // Variables internal to initialization process only int nMaxConnections; int available_fds; -ServiceFlags g_local_services = ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS); +ServiceFlags g_local_services = ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA); int64_t peer_connect_timeout; std::set g_enabled_filter_types; @@ -1458,10 +1468,6 @@ static ChainstateLoadResult InitAndLoadChainstate( LogPrintf("* Flushing caches if available system memory drops below %s MiB\n", g_low_memory_threshold / 1024 / 1024); } - if (mempool_opts.rbf_policy == RBFPolicy::Always) { - g_local_services = ServiceFlags(g_local_services | NODE_REPLACE_BY_FEE); - } - ChainstateManager::Options chainman_opts{ .chainparams = chainparams, .datadir = args.GetDataDirNet(), @@ -1513,7 +1519,7 @@ static ChainstateLoadResult InitAndLoadChainstate( index->Interrupt(); index->Stop(); if (!(index->Init() && index->StartBackgroundSync())) { - LogPrintf("[snapshot] WARNING failed to restart index %s on snapshot chain\n", index->GetName()); + LogWarning("[snapshot] Failed to restart index %s on snapshot chain", index->GetName()); } } }; @@ -1577,11 +1583,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // Warn about relative -datadir path. if (args.IsArgSet("-datadir") && !args.GetPathArg("-datadir").is_absolute()) { - LogPrintf("Warning: relative datadir option '%s' specified, which will be interpreted relative to the " - "current working directory '%s'. This is fragile, because if bitcoin is started in the future " - "from a different location, it will be unable to locate the current data files. There could " - "also be data loss if bitcoin is started while in a temporary directory.\n", - args.GetArg("-datadir", ""), fs::PathToString(fs::current_path())); + LogWarning("Relative datadir option '%s' specified, which will be interpreted relative to the " + "current working directory '%s'. This is fragile, because if bitcoin is started in the future " + "from a different location, it will be unable to locate the current data files. There could " + "also be data loss if bitcoin is started while in a temporary directory.", + args.GetArg("-datadir", ""), fs::PathToString(fs::current_path())); } assert(!node.scheduler); @@ -2011,7 +2017,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // requested to kill the GUI during the last operation. If so, exit. if (ShutdownRequested(node)) { LogPrintf("Shutdown requested. Exiting.\n"); - return false; + return true; } ChainstateManager& chainman = *Assert(node.chainman); @@ -2066,7 +2072,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } else { // Prior to setting NODE_NETWORK, check if we can provide historical blocks. if (!WITH_LOCK(chainman.GetMutex(), return chainman.BackgroundSyncInProgress())) { - LogPrintf("Setting NODE_NETWORK on non-prune mode\n"); + LogInfo("Setting NODE_NETWORK in non-prune mode"); g_local_services = ServiceFlags(g_local_services | NODE_NETWORK); } else { LogPrintf("Running node in NODE_NETWORK_LIMITED mode until snapshot background sync completes\n"); @@ -2100,7 +2106,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) "Approximately %u GB of data will be stored in this directory." ), fs::quoted(fs::PathToString(args.GetBlocksDirPath())), - additional_bytes_needed / 1'000'000'000 + CeilDiv(additional_bytes_needed, 1'000'000'000) )); } } @@ -2130,6 +2136,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) ScheduleBatchPriority(); // Import blocks and ActivateBestChain() ImportBlocks(chainman, vImportFiles); + WITH_LOCK(::cs_main, chainman.UpdateIBDStatus()); if (args.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) { LogPrintf("Stopping after block import\n"); if (!(Assert(node.shutdown_request))()) { @@ -2172,7 +2179,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } if (ShutdownRequested(node)) { - return false; + return true; } // ********************************************************* Step 12: start node @@ -2213,6 +2220,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) connOptions.m_peer_connect_timeout = peer_connect_timeout; connOptions.whitelist_forcerelay = args.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY); connOptions.whitelist_relay = args.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY); + connOptions.m_capture_messages = args.GetBoolArg("-capturemessages", false); connOptions.disable_v1conn_clearnet = args.GetBoolArg("-v2onlyclearnet", false); // Port to bind to if `-bind=addr` is provided without a `:port` suffix. @@ -2278,22 +2286,46 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } } + connOptions.listenonion = args.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION); + CService onion_service_target; if (!connOptions.onion_binds.empty()) { onion_service_target = connOptions.onion_binds.front(); } else if (!connOptions.vBinds.empty()) { onion_service_target = connOptions.vBinds.front(); + if (connOptions.listenonion) { + std::string alternate_connections{"clearnet"}, only_from_localhost; + if (onion_service_target.IsBindAny()) { + only_from_localhost = " from localhost"; + } else if (onion_service_target.IsLocal()) { + alternate_connections = "local"; + } + InitWarning(strprintf(_("You are using a common listening port (%s) for both Tor and %s connections. All connections to this port%s will be assumed to be Tor connections, and will be denied any whitelist permissions. If this is not your intent, setup a separate -bind=[:]=onion configuration, or set -listenonion=0."), + onion_service_target.ToStringAddrPort(), + alternate_connections, + only_from_localhost)); + } } else { onion_service_target = DefaultOnionServiceTarget(default_bind_port_onion); connOptions.onion_binds.push_back(onion_service_target); } - if (args.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) { + if (connOptions.listenonion) { if (connOptions.onion_binds.size() > 1) { InitWarning(strprintf(_("More than one onion bind address is provided. Using %s " "for the automatically created Tor onion service."), onion_service_target.ToStringAddrPort())); } + if (onion_service_target.IsBindAny()) { + CNetAddr loopback_addr = onion_service_target; + // NOTE: GetNetwork is not_publicly_routable here + if (onion_service_target.ToStringAddr() == "0.0.0.0") { + loopback_addr = LookupHost("127.0.0.1", /*fAllowLookup=*/false).value(); + } else { + loopback_addr = LookupHost("[::1]", /*fAllowLookup=*/false).value(); + } + onion_service_target.SetIP(loopback_addr); + } StartTorControl(onion_service_target); } diff --git a/src/interfaces/mining.h b/src/interfaces/mining.h index 676487a50e..b0950e364d 100644 --- a/src/interfaces/mining.h +++ b/src/interfaces/mining.h @@ -84,7 +84,7 @@ class Mining * @param[in] timeout how long to wait for a new tip * @returns Hash and height of the current chain tip after this call. */ - virtual BlockRef waitTipChanged(uint256 current_tip, MillisecondsDouble timeout = MillisecondsDouble::max()) = 0; + virtual std::optional waitTipChanged(uint256 current_tip, MillisecondsDouble timeout = MillisecondsDouble::max()) = 0; /** * Construct a new block template. For the createNewBlock variant, subclass options (if any) are silently lost and overridden by any config args. For createNewBlock2, the options are assumed to be complete. diff --git a/src/ipc/process.cpp b/src/ipc/process.cpp index bdc541b654..07957d90ce 100644 --- a/src/ipc/process.cpp +++ b/src/ipc/process.cpp @@ -114,7 +114,7 @@ int ProcessImpl::connect(const fs::path& data_dir, } int connect_error = errno; if (::close(fd) != 0) { - LogPrintf("Error closing file descriptor %i '%s': %s\n", fd, address, SysErrorString(errno)); + LogWarning("Error closing file descriptor %i '%s': %s", fd, address, SysErrorString(errno)); } throw std::system_error(connect_error, std::system_category()); } @@ -145,7 +145,7 @@ int ProcessImpl::bind(const fs::path& data_dir, const std::string& exe_name, std } int bind_error = errno; if (::close(fd) != 0) { - LogPrintf("Error closing file descriptor %i: %s\n", fd, SysErrorString(errno)); + LogWarning("Error closing file descriptor %i: %s", fd, SysErrorString(errno)); } throw std::system_error(bind_error, std::system_category()); } diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 47fe82fa91..b418139ef9 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -117,6 +117,15 @@ class CMainParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1628640000; // August 11th, 2021 consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 709632; // Approximately November 12th, 2021 + // ReducedData Temporary Softfork (RDTS) + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].bit = 4; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = 1764547200; // December 1st, 2025 + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].max_activation_height = 965664; // ~September 1st, 2026 + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].active_duration = 52416; // ~1 year + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].threshold = 1109; // 55% of 2016 + consensus.nMinimumChainWork = uint256{"0000000000000000000000000000000000000000dee8e2a309ad8a9820433c68"}; consensus.defaultAssumeValid = uint256{"00000000000000000000611fd22f2df7c8fbd0688745c3a6c3bb5109cc2a12cb"}; // 912683 @@ -146,7 +155,8 @@ class CMainParams : public CChainParams { // release ASAP to avoid it where possible. vSeeds.emplace_back("seed.bitcoin.sipa.be."); // Pieter Wuille, only supports x1, x5, x9, and xd vSeeds.emplace_back("dnsseed.bluematt.me."); // Matt Corallo, only supports x9 - vSeeds.emplace_back("dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us."); // Luke Dashjr + vSeeds.emplace_back("dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us."); // Luke Dashjr, support BIP110 seeding (x8000009) + vSeeds.emplace_back("seed.bitcoin.haf.ovh."); // Léo Haf, support BIP110 seeding (x8000009) vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch."); // Jonas Schnelli, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.btc.petertodd.net."); // Peter Todd, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.bitcoin.sprovoost.nl."); // Sjors Provoost @@ -279,6 +289,14 @@ class CTestNetParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1628640000; // August 11th, 2021 consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 0; // No activation delay + // ReducedData Temporary Softfork (RDTS) + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].bit = 4; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = 1764547200; // December 1st, 2025 + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].active_duration = 52416; // ~1 year + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].threshold = 1109; // 55% of 2016 + consensus.nMinimumChainWork = uint256{"0000000000000000000000000000000000000000000015f5e0c9f13455b0eb17"}; consensus.defaultAssumeValid = uint256{"00000000000003fc7967410ba2d0a8a8d50daedc318d43e8baf1a9782c236a57"}; // 3974606 @@ -378,6 +396,14 @@ class CTestNet4Params : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 0; // No activation delay + // ReducedData Temporary Softfork (RDTS) + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].bit = 4; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = 1764547200; // December 1st, 2025 + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].active_duration = 52416; // ~1 year + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].threshold = 1109; // 55% of 2016 + consensus.nMinimumChainWork = uint256{"0000000000000000000000000000000000000000000001d6dce8651b6094e4c1"}; consensus.defaultAssumeValid = uint256{"0000000000003ed4f08dbdf6f7d6b271a6bcffce25675cb40aa9fa43179a89f3"}; // 72600 @@ -516,6 +542,11 @@ class SigNetParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 0; // No activation delay + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].bit = 4; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; + // message start is defined as the first 4 bytes of the sha256d of the block script HashWriter h{}; h << consensus.signet_challenge; @@ -591,6 +622,11 @@ class CRegTestParams : public CChainParams consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 0; // No activation delay + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].bit = 4; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; + consensus.nMinimumChainWork = uint256{}; consensus.defaultAssumeValid = uint256{}; @@ -627,6 +663,13 @@ class CRegTestParams : public CChainParams consensus.vDeployments[deployment_pos].nStartTime = version_bits_params.start_time; consensus.vDeployments[deployment_pos].nTimeout = version_bits_params.timeout; consensus.vDeployments[deployment_pos].min_activation_height = version_bits_params.min_activation_height; + consensus.vDeployments[deployment_pos].max_activation_height = version_bits_params.max_activation_height; + consensus.vDeployments[deployment_pos].active_duration = version_bits_params.active_duration; + // Validated here rather than in src/chainparams.cpp because nMinerConfirmationWindow is not yet available at parse time + if (version_bits_params.active_duration != std::numeric_limits::max() && version_bits_params.active_duration % consensus.nMinerConfirmationWindow != 0) { + throw std::runtime_error(strprintf("active_duration (%d) must be a multiple of nMinerConfirmationWindow (%d)", version_bits_params.active_duration, consensus.nMinerConfirmationWindow)); + } + consensus.vDeployments[deployment_pos].threshold = version_bits_params.threshold; } genesis = CreateGenesisBlock(1296688602, 2, 0x207fffff, 1, 50 * COIN); diff --git a/src/kernel/chainparams.h b/src/kernel/chainparams.h index a30097ca96..09d2ddfee5 100644 --- a/src/kernel/chainparams.h +++ b/src/kernel/chainparams.h @@ -153,6 +153,9 @@ class CChainParams int64_t start_time; int64_t timeout; int min_activation_height; + int max_activation_height{std::numeric_limits::max()}; + int active_duration{std::numeric_limits::max()}; + int threshold{0}; // 0 means use global nRuleChangeActivationThreshold }; /** diff --git a/src/kernel/coinstats.cpp b/src/kernel/coinstats.cpp index 81c496ab34..a0b7e6cd9c 100644 --- a/src/kernel/coinstats.cpp +++ b/src/kernel/coinstats.cpp @@ -93,7 +93,7 @@ static void ApplyHash(T& hash_obj, const Txid& hash, const std::mapfirst); - Coin coin = it->second; + const Coin& coin = it->second; ApplyCoinHash(hash_obj, outpoint, coin); } } diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc index 65e31724bc..10f523257c 100644 --- a/src/leveldb/db/db_impl.cc +++ b/src/leveldb/db/db_impl.cc @@ -803,6 +803,7 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) { pending_outputs_.insert(file_number); CompactionState::Output out; out.number = file_number; + out.file_size = 0; out.smallest.Clear(); out.largest.Clear(); compact->outputs.push_back(out); diff --git a/src/logging.cpp b/src/logging.cpp index 2ed6835197..5decd8af58 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -532,7 +532,7 @@ void BCLog::Logger::ShrinkDebugFile() // Restart the file with some of the end std::vector vch(RECENT_DEBUG_HISTORY_SIZE, 0); if (fseek(file, -((long)vch.size()), SEEK_END)) { - LogPrintf("Failed to shrink debug log file: fseek(...) failed\n"); + LogWarning("Failed to shrink debug log file: fseek(...) failed"); fclose(file); return; } diff --git a/src/mapport.cpp b/src/mapport.cpp index 43d17d7bfc..7c45832ae1 100644 --- a/src/mapport.cpp +++ b/src/mapport.cpp @@ -88,11 +88,11 @@ static bool ProcessPCP() // Open a port mapping on whatever local address we have toward the gateway. struct in_addr inaddr_any; inaddr_any.s_addr = htonl(INADDR_ANY); - auto res = PCPRequestPortMap(pcp_nonce, *gateway4, CNetAddr(inaddr_any), private_port, requested_lifetime); + auto res = PCPRequestPortMap(pcp_nonce, *gateway4, CNetAddr(inaddr_any), private_port, requested_lifetime, g_mapport_interrupt); MappingError* pcp_err = std::get_if(&res); if (pcp_err && *pcp_err == MappingError::UNSUPP_VERSION) { LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "portmap: Got unsupported PCP version response, falling back to NAT-PMP\n"); - res = NATPMPRequestPortMap(*gateway4, private_port, requested_lifetime); + res = NATPMPRequestPortMap(*gateway4, private_port, requested_lifetime, g_mapport_interrupt); } handle_mapping(res); } @@ -107,7 +107,7 @@ static bool ProcessPCP() // Try to open pinholes for all routable local IPv6 addresses. for (const auto &addr: GetLocalAddresses()) { if (!addr.IsRoutable() || !addr.IsIPv6()) continue; - auto res = PCPRequestPortMap(pcp_nonce, *gateway6, addr, private_port, requested_lifetime); + auto res = PCPRequestPortMap(pcp_nonce, *gateway6, addr, private_port, requested_lifetime, g_mapport_interrupt); handle_mapping(res); } } @@ -283,6 +283,11 @@ static void MapPortProtoSetEnabled(MapPortProtoFlag proto, bool enabled) } } +bool MapPortIsProtoEnabled(const MapPortProtoFlag proto) +{ + return g_mapport_enabled_protos & proto; +} + void StartMapPort(bool use_upnp, bool use_pcp) { MapPortProtoSetEnabled(MapPortProtoFlag::UPNP, use_upnp); diff --git a/src/mapport.h b/src/mapport.h index 56808ee419..9cd8681067 100644 --- a/src/mapport.h +++ b/src/mapport.h @@ -15,6 +15,8 @@ enum MapPortProtoFlag : unsigned int { PCP = 0x02, // PCP with NAT-PMP fallback. }; +bool MapPortIsProtoEnabled(MapPortProtoFlag); + void StartMapPort(bool use_upnp, bool use_pcp); void InterruptMapPort(); void StopMapPort(); diff --git a/src/net.cpp b/src/net.cpp index 96bd5d3f41..6ba9c033ae 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -115,7 +115,7 @@ const std::string NET_MESSAGE_TYPE_OTHER = "*other*"; static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("netgroup")[0:8] static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // SHA256("localhostnonce")[0:8] -static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL; // SHA256("addrcache")[0:8] +static const uint64_t RANDOMIZER_ID_NETWORKKEY = 0x0e8a2b136c592a7dULL; // SHA256("networkkey")[0:8] // // Global state variables // @@ -228,7 +228,7 @@ CService GetLocalAddress(const CNode& peer) return GetLocal(peer).value_or(CService{CNetAddr(), GetListenPort()}); } -static int GetnScore(const CService& addr) +int GetnScore(const CService& addr) { LOCK(g_maplocalhost_mutex); const auto it = mapLocalHost.find(addr); @@ -330,7 +330,9 @@ bool SeenLocal(const CService& addr) LOCK(g_maplocalhost_mutex); const auto it = mapLocalHost.find(addr); if (it == mapLocalHost.end()) return false; - ++it->second.nScore; + if (it->second.nScore < std::numeric_limits::max()) { + ++it->second.nScore; + } return true; } @@ -543,6 +545,13 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo if (!addr_bind.IsValid()) { addr_bind = GetBindAddress(*sock); } + uint64_t network_id = GetDeterministicRandomizer(RANDOMIZER_ID_NETWORKKEY) + .Write(target_addr.GetNetClass()) + .Write(addr_bind.GetAddrBytes()) + // For outbound connections, the port of the bound address is randomly + // assigned by the OS and would therefore not be useful for seeding. + .Write(0) + .Finalize(); CNode* pnode = new CNode(id, std::move(sock), target_addr, @@ -552,6 +561,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo pszDest ? pszDest : "", conn_type, /*inbound_onion=*/false, + network_id, CNodeOptions{ .permission_flags = permission_flags, .i2p_sam_session = std::move(i2p_transient_session), @@ -587,9 +597,9 @@ void CNode::CloseSocketDisconnect() m_i2p_sam_session.reset(); } -void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector& ranges) const { +void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, std::optional addr, const std::vector& ranges) const { for (const auto& subnet : ranges) { - if (subnet.m_subnet.Match(addr)) { + if (addr.has_value() && subnet.m_subnet.Match(addr.value())) { NetPermissions::AddFlag(flags, subnet.m_flags); } } @@ -1783,7 +1793,30 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, { int nInbound = 0; - AddWhitelistPermissionFlags(permission_flags, addr, vWhitelistedRangeIncoming); + const bool inbound_onion = [this, &addr, &addr_bind]{ + if (m_onion_binds.empty()) { + if (!m_listenonion) { + // If -listenonion=0, assume we do not have inbound Tor connections on non-onion listeners + return false; + } + // Tor connections are coming in on the first -bind + if ((!m_normal_binds.empty()) && addr_bind == m_normal_binds.front()) { + if (addr_bind.IsBindAny()) { + // Tor connections should have a source IP that is local + return addr.IsLocal(); + } + // Otherwise, the source IP is unpredictable, so assume anything could be onion + return true; + } + return false; + } else { + return std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); + } + }(); + + // Tor inbound connections do not reveal the peer's actual network address. + // Therefore do not apply address-based whitelist permissions to them. + AddWhitelistPermissionFlags(permission_flags, inbound_onion ? std::optional{} : addr, vWhitelistedRangeIncoming); { LOCK(m_nodes_mutex); @@ -1844,12 +1877,16 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); - const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); // The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is // detected, so use it whenever we signal NODE_P2P_V2. ServiceFlags local_services = GetLocalServices(); const bool use_v2transport(local_services & NODE_P2P_V2); + uint64_t network_id = GetDeterministicRandomizer(RANDOMIZER_ID_NETWORKKEY) + .Write(inbound_onion ? NET_ONION : addr.GetNetClass()) + .Write(addr_bind.GetAddrBytes()) + .Write(addr_bind.GetPort()) // inbound connections use bind port + .Finalize(); CNode* pnode = new CNode(id, std::move(sock), CAddress{addr, NODE_NONE}, @@ -1859,6 +1896,7 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, /*addrNameIn=*/"", ConnectionType::INBOUND, inbound_onion, + network_id, CNodeOptions{ .permission_flags = permission_flags, .prefer_evict = discouraged, @@ -2015,16 +2053,15 @@ void CConnman::NotifyNumConnectionsChanged() } } -bool CConnman::ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const +bool CConnman::ShouldRunInactivityChecks(const CNode& node, std::chrono::microseconds now) const { return node.m_connected + m_peer_connect_timeout < now; } -bool CConnman::InactivityCheck(const CNode& node) const +bool CConnman::InactivityCheck(const CNode& node, std::chrono::microseconds now) const { // Tests that see disconnects after using mocktime can start nodes with a // large timeout. For example, -peertimeout=999999999. - const auto now{GetTime()}; const auto last_send{node.m_last_send.load()}; const auto last_recv{node.m_last_recv.load()}; @@ -2048,7 +2085,7 @@ bool CConnman::InactivityCheck(const CNode& node) const if (now > last_send + TIMEOUT_INTERVAL) { LogDebug(BCLog::NET, - "socket sending timeout: %is, %s\n", count_seconds(now - last_send), + "socket sending timeout: %is, %s\n", Ticks(now - last_send), node.DisconnectMsg(fLogIPs) ); return true; @@ -2056,7 +2093,7 @@ bool CConnman::InactivityCheck(const CNode& node) const if (now > last_recv + TIMEOUT_INTERVAL) { LogDebug(BCLog::NET, - "socket receive timeout: %is, %s\n", count_seconds(now - last_recv), + "socket receive timeout: %is, %s\n", Ticks(now - last_recv), node.DisconnectMsg(fLogIPs) ); return true; @@ -2138,6 +2175,8 @@ void CConnman::SocketHandlerConnected(const std::vector& nodes, { AssertLockNotHeld(m_total_bytes_sent_mutex); + auto now = GetTime(); + for (CNode* pnode : nodes) { if (interruptNet) return; @@ -2228,7 +2267,7 @@ void CConnman::SocketHandlerConnected(const std::vector& nodes, } } - if (InactivityCheck(*pnode)) pnode->fDisconnect = true; + if (InactivityCheck(*pnode, now)) pnode->fDisconnect = true; } } @@ -2285,7 +2324,7 @@ void CConnman::ThreadDNSAddressSeed() break; } - outbound_connection_count = GetFullOutboundConnCount(); + outbound_connection_count = GetBIP110FullOutboundConnCount(); if (outbound_connection_count >= SEED_OUTBOUND_CONNECTION_THRESHOLD) { LogPrintf("P2P peers available. Finished fetching data from seed nodes.\n"); break; @@ -2340,7 +2379,7 @@ void CConnman::ThreadDNSAddressSeed() if (!interruptNet.sleep_for(w)) return; to_wait -= w; - if (GetFullOutboundConnCount() >= SEED_OUTBOUND_CONNECTION_THRESHOLD) { + if (GetBIP110FullOutboundConnCount() >= SEED_OUTBOUND_CONNECTION_THRESHOLD) { if (found > 0) { LogPrintf("%d addresses found from DNS seeds\n", found); LogPrintf("P2P peers available. Finished DNS seeding.\n"); @@ -2453,14 +2492,15 @@ void CConnman::StartExtraBlockRelayPeers() m_start_extra_block_relay_peers = true; } -// Return the number of outbound connections that are full relay (not blocks only) -int CConnman::GetFullOutboundConnCount() const +// Return the number of BIP110 outbound connections that are full relay (not blocks only). +// Non-BIP110 outbound peers are excluded as they are "additional" and don't count toward limits. +int CConnman::GetBIP110FullOutboundConnCount() const { int nRelevant = 0; { LOCK(m_nodes_mutex); for (const CNode* pnode : m_nodes) { - if (pnode->fSuccessfullyConnected && pnode->IsFullOutboundConn()) ++nRelevant; + if (pnode->fSuccessfullyConnected && pnode->IsFullOutboundConn() && !pnode->m_is_non_bip110_outbound) ++nRelevant; } } return nRelevant; @@ -2668,7 +2708,8 @@ void CConnman::ThreadOpenConnections(const std::vector connect, Spa { LOCK(m_nodes_mutex); for (const CNode* pnode : m_nodes) { - if (pnode->IsFullOutboundConn()) nOutboundFullRelay++; + // Non-BIP110 outbound peers are "additional" - don't count toward limits + if (pnode->IsFullOutboundConn() && !pnode->m_is_non_bip110_outbound) nOutboundFullRelay++; if (pnode->IsBlockOnlyConn()) nOutboundBlockRelay++; // Make sure our persistent outbound slots to ipv4/ipv6 peers belong to different netgroups. @@ -3508,6 +3549,8 @@ void CConnman::StopThreads() void CConnman::StopNodes() { + AssertLockNotHeld(m_reconnections_mutex); + if (fAddressesInitialized) { DumpAddresses(); fAddressesInitialized = false; @@ -3535,6 +3578,7 @@ void CConnman::StopNodes() DeleteNode(pnode); } m_nodes_disconnected.clear(); + WITH_LOCK(m_reconnections_mutex, m_reconnections.clear()); vhListenSocket.clear(); semOutbound.reset(); semAddnode.reset(); @@ -3567,15 +3611,9 @@ std::vector CConnman::GetAddresses(size_t max_addresses, size_t max_pc std::vector CConnman::GetAddresses(CNode& requestor, size_t max_addresses, size_t max_pct) { auto local_socket_bytes = requestor.addrBind.GetAddrBytes(); - uint64_t cache_id = GetDeterministicRandomizer(RANDOMIZER_ID_ADDRCACHE) - .Write(requestor.ConnectedThroughNetwork()) - .Write(local_socket_bytes) - // For outbound connections, the port of the bound address is randomly - // assigned by the OS and would therefore not be useful for seeding. - .Write(requestor.IsInboundConn() ? requestor.addrBind.GetPort() : 0) - .Finalize(); + uint64_t network_id = requestor.m_network_key; const auto current_time = GetTime(); - auto r = m_addr_response_caches.emplace(cache_id, CachedAddrResponse{}); + auto r = m_addr_response_caches.emplace(network_id, CachedAddrResponse{}); CachedAddrResponse& cache_entry = r.first->second; if (cache_entry.m_cache_entry_expiration < current_time) { // If emplace() added new one it has expiration 0. cache_entry.m_addrs_response_cache = GetAddresses(max_addresses, max_pct, /*network=*/std::nullopt); @@ -3864,6 +3902,7 @@ CNode::CNode(NodeId idIn, const std::string& addrNameIn, ConnectionType conn_type_in, bool inbound_onion, + uint64_t network_key, CNodeOptions&& node_opts) : m_transport{MakeTransport(idIn, node_opts.use_v2transport, conn_type_in == ConnectionType::INBOUND)}, m_permission_flags{node_opts.permission_flags}, @@ -3877,6 +3916,7 @@ CNode::CNode(NodeId idIn, m_prefer_evict{node_opts.prefer_evict}, m_forced_inbound{node_opts.forced_inbound}, nKeyedNetGroup{nKeyedNetGroupIn}, + m_network_key{network_key}, m_conn_type{conn_type_in}, id{idIn}, nLocalHostNonce{nLocalHostNonceIn}, @@ -3938,7 +3978,7 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) AssertLockNotHeld(m_total_bytes_sent_mutex); size_t nMessageSize = msg.data.size(); LogDebug(BCLog::NET, "sending %s (%d bytes) peer=%d\n", msg.m_type, nMessageSize, pnode->GetId()); - if (gArgs.GetBoolArg("-capturemessages", false)) { + if (m_capture_messages) { CaptureMessage(pnode->addr, msg.m_type, msg.data, /*is_incoming=*/false); } diff --git a/src/net.h b/src/net.h index 7f13b4fe61..7184567d2a 100644 --- a/src/net.h +++ b/src/net.h @@ -97,6 +97,9 @@ static constexpr bool DEFAULT_V2_TRANSPORT{true}; typedef int64_t NodeId; +/** Get the score of a local address. */ +int GetnScore(const CService& addr); + struct AddedNodeParams { std::string m_added_node; bool m_use_v2transport; @@ -744,6 +747,10 @@ class CNode std::atomic_bool fPauseRecv{false}; std::atomic_bool fPauseSend{false}; + /** Network key used to prevent fingerprinting our node across networks. + * Influenced by the network and the bind address (+ bind port for inbounds) */ + const uint64_t m_network_key; + const ConnectionType m_conn_type; /** Move all messages from the received queue to the processing queue. */ @@ -858,6 +865,10 @@ class CNode /** Whether this peer provides all services that we want. Used for eviction decisions */ std::atomic_bool m_has_all_wanted_services{false}; + /** Whether this is a non-BIP110 outbound peer (lacks NODE_REDUCED_DATA). + * Used to exclude from outbound connection counts. Limited to 2 such peers. */ + std::atomic_bool m_is_non_bip110_outbound{false}; + /** Whether we should relay transactions to this peer. This only changes * from false to true. It will never change back to false. */ std::atomic_bool m_relays_txs{false}; @@ -895,6 +906,7 @@ class CNode const std::string& addrNameIn, ConnectionType conn_type_in, bool inbound_onion, + uint64_t network_key, CNodeOptions&& node_opts = {}); CNode(const CNode&) = delete; CNode& operator=(const CNode&) = delete; @@ -1099,6 +1111,7 @@ class CConnman std::vector vWhiteBinds; std::vector vBinds; std::vector onion_binds; + bool listenonion{false}; /// True if the user did not specify -bind= or -whitebind= and thus /// we should bind on `0.0.0.0` (IPv4) and `::` (IPv6). bool bind_on_any; @@ -1108,6 +1121,7 @@ class CConnman bool m_i2p_accept_incoming; bool whitelist_forcerelay = DEFAULT_WHITELISTFORCERELAY; bool whitelist_relay = DEFAULT_WHITELISTRELAY; + bool m_capture_messages = false; bool disable_v1conn_clearnet = false; }; @@ -1143,12 +1157,18 @@ class CConnman m_added_node_params.push_back({added_node, use_v2transport}); } } + m_normal_binds = connOptions.vBinds; m_onion_binds = connOptions.onion_binds; + m_listenonion = connOptions.listenonion; whitelist_forcerelay = connOptions.whitelist_forcerelay; whitelist_relay = connOptions.whitelist_relay; + m_capture_messages = connOptions.m_capture_messages; disable_v1conn_clearnet = connOptions.disable_v1conn_clearnet; } + // test only + void SetCaptureMessages(bool cap) { m_capture_messages = cap; } + CConnman(uint64_t seed0, uint64_t seed1, AddrMan& addrman, const NetGroupManager& netgroupman, const CChainParams& params, bool network_active = true); @@ -1157,9 +1177,10 @@ class CConnman bool Start(CScheduler& scheduler, const Options& options) EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !m_added_nodes_mutex, !m_addr_fetches_mutex, !mutexMsgProc); void StopThreads(); - void StopNodes(); - void Stop() + void StopNodes() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex); + void Stop() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex) { + AssertLockNotHeld(m_reconnections_mutex); StopThreads(); StopNodes(); }; @@ -1223,8 +1244,8 @@ class CConnman void StartExtraBlockRelayPeers(); - // Count the number of full-relay peer we have. - int GetFullOutboundConnCount() const; + // Count the number of BIP110 full-relay peers we have (excludes non-BIP110 peers). + int GetBIP110FullOutboundConnCount() const; // Return the number of outbound peers we have in excess of our target (eg, // if we previously called SetTryNewOutboundPeer(true), and have since set // to false, we may have extra peers that we wish to disconnect). This may @@ -1302,7 +1323,7 @@ class CConnman void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc); /** Return true if we should disconnect the peer for failing an inactivity check. */ - bool ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const; + bool ShouldRunInactivityChecks(const CNode& node, std::chrono::microseconds now) const; bool MultipleManualOrFullOutboundConns(Network net) const EXCLUSIVE_LOCKS_REQUIRED(m_nodes_mutex); @@ -1355,7 +1376,7 @@ class CConnman void DisconnectNodes() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex, !m_nodes_mutex); void NotifyNumConnectionsChanged(); /** Return true if the peer is inactive and should be disconnected. */ - bool InactivityCheck(const CNode& node) const; + bool InactivityCheck(const CNode& node, std::chrono::microseconds now) const; /** * Generate a collection of sockets to check for IO readiness. @@ -1408,7 +1429,7 @@ class CConnman bool AttemptToEvictConnection(bool force); CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); - void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector& ranges) const; + void AddWhitelistPermissionFlags(NetPermissionFlags& flags, std::optional addr, const std::vector& ranges) const; void DeleteNode(CNode* pnode); @@ -1617,11 +1638,14 @@ class CConnman */ std::atomic_bool m_start_extra_block_relay_peers{false}; + std::vector m_normal_binds; + /** * A vector of -bind=
:=onion arguments each of which is * an address and port that are designated for incoming Tor connections. */ std::vector m_onion_binds; + bool m_listenonion; /** * flag for adding 'forcerelay' permission to whitelisted inbound @@ -1635,6 +1659,11 @@ class CConnman */ bool whitelist_relay; + /** + * flag for whether messages are captured + */ + bool m_capture_messages{false}; + /** * option for disabling outbound v1 connections on IPV4 and IPV6. * outbound connections on IPV4/IPV6 need to be v2 connections. diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 86e9980884..ff15ad22bd 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -556,12 +556,6 @@ class PeerManagerImpl final : public PeerManager bool via_compact_block, const std::string& message = "") EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - /** - * Potentially disconnect and discourage a node based on the contents of a TxValidationState object - */ - void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - /** Maybe disconnect a peer and discourage future connections from its address. * * @param[in] pnode The node to check. @@ -786,7 +780,7 @@ class PeerManagerImpl final : public PeerManager uint32_t GetFetchFlags(const Peer& peer) const; - std::atomic m_next_inv_to_inbounds{0us}; + std::map m_next_inv_to_inbounds_per_network_key GUARDED_BY(g_msgproc_mutex); /** Number of nodes with fSyncStarted. */ int nSyncStarted GUARDED_BY(cs_main) = 0; @@ -805,6 +799,9 @@ class PeerManagerImpl final : public PeerManager /** Number of peers with wtxid relay. */ std::atomic m_wtxid_relay_peers{0}; + /** Number of outbound peers without NODE_REDUCED_DATA (BIP-110). Limited to 2. */ + std::atomic m_num_non_bip110_outbound{0}; + /** Number of outbound peers with m_chain_sync.m_protect. */ int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; @@ -816,12 +813,14 @@ class PeerManagerImpl final : public PeerManager /** * For sending `inv`s to inbound peers, we use a single (exponentially - * distributed) timer for all peers. If we used a separate timer for each + * distributed) timer for all peers with the same network key. If we used a separate timer for each * peer, a spy node could make multiple inbound connections to us to - * accurately determine when we received the transaction (and potentially - * determine the transaction's origin). */ + * accurately determine when we received a transaction (and potentially + * determine the transaction's origin). Each network key has its own timer + * to make fingerprinting harder. */ std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, - std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); + std::chrono::seconds average_interval, + uint64_t network_key) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); // All of the following cache a recent block, and are protected by m_most_recent_block_mutex @@ -1124,15 +1123,15 @@ static bool CanServeWitnesses(const Peer& peer) } std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now, - std::chrono::seconds average_interval) + std::chrono::seconds average_interval, + uint64_t network_key) { - if (m_next_inv_to_inbounds.load() < now) { - // If this function were called from multiple threads simultaneously - // it would possible that both update the next send variable, and return a different result to their caller. - // This is not possible in practice as only the net processing thread invokes this function. - m_next_inv_to_inbounds = now + m_rng.rand_exp_duration(average_interval); + auto [it, inserted] = m_next_inv_to_inbounds_per_network_key.try_emplace(network_key, 0us); + auto& timer{it->second}; + if (timer < now) { + timer = now + m_rng.rand_exp_duration(average_interval); } - return m_next_inv_to_inbounds; + return timer; } bool PeerManagerImpl::IsBlockRequested(const uint256& hash) @@ -1598,6 +1597,11 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) assert(peer != nullptr); m_wtxid_relay_peers -= peer->m_wtxid_relay; assert(m_wtxid_relay_peers >= 0); + // Decrement non-BIP110 counter if this was a non-BIP110 outbound peer + if (node.m_is_non_bip110_outbound) { + --m_num_non_bip110_outbound; + assert(m_num_non_bip110_outbound >= 0); + } } CNodeState *state = State(nodeid); assert(state != nullptr); @@ -1866,32 +1870,6 @@ void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati } } -void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state) -{ - PeerRef peer{GetPeerRef(nodeid)}; - switch (state.GetResult()) { - case TxValidationResult::TX_RESULT_UNSET: - break; - // The node is providing invalid data: - case TxValidationResult::TX_CONSENSUS: - HandleDoSPunishment(m_connman, nodeid, 100, "transaction"); - return; - // Conflicting (but not necessarily invalid) data or different policy: - case TxValidationResult::TX_INPUTS_NOT_STANDARD: - case TxValidationResult::TX_NOT_STANDARD: - case TxValidationResult::TX_MISSING_INPUTS: - case TxValidationResult::TX_PREMATURE_SPEND: - case TxValidationResult::TX_WITNESS_MUTATED: - case TxValidationResult::TX_WITNESS_STRIPPED: - case TxValidationResult::TX_CONFLICT: - case TxValidationResult::TX_MEMPOOL_POLICY: - case TxValidationResult::TX_NO_MEMPOOL: - case TxValidationResult::TX_RECONSIDERABLE: - case TxValidationResult::TX_UNKNOWN: - break; - } -} - bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex) { AssertLockHeld(cs_main); @@ -3065,8 +3043,6 @@ std::optional PeerManagerImpl::ProcessInvalidTx(NodeId if (peer) AddKnownTx(*peer, parent_txid); } - MaybePunishNodeForTx(nodeid, state); - return package_to_validate; } @@ -3393,7 +3369,21 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl } PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock; - ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn); + + if (partialBlock.header.IsNull()) { + // It is possible for the header to be empty if a previous call to FillBlock wiped the header, but left + // the PartiallyDownloadedBlock pointer around (i.e. did not call RemoveBlockRequest). In this case, we + // should not call LookupBlockIndex below. + RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); + Misbehaving(peer, "previous compact block reconstruction attempt failed"); + LogDebug(BCLog::NET, "Peer %d sent compact block transactions multiple times", pfrom.GetId()); + return; + } + + // We should not have gotten this far in compact block processing unless it's attached to a known header + const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(partialBlock.header.hashPrevBlock))}; + ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn, + /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); if (status == READ_STATUS_INVALID) { RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect Misbehaving(peer, "invalid compact block/non-matching block transactions"); @@ -3401,6 +3391,9 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl } else if (status == READ_STATUS_FAILED) { if (first_in_flight) { // Might have collided, fall back to getdata now :( + // We keep the failed partialBlock to disallow processing another compact block announcement from the same + // peer for the same block. We let the full block download below continue under the same m_downloading_since + // timer. std::vector invs; invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash); MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs); @@ -3410,23 +3403,7 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl return; } } else { - // Block is either okay, or possibly we received - // READ_STATUS_CHECKBLOCK_FAILED. - // Note that CheckBlock can only fail for one of a few reasons: - // 1. bad-proof-of-work (impossible here, because we've already - // accepted the header) - // 2. merkleroot doesn't match the transactions given (already - // caught in FillBlock with READ_STATUS_FAILED, so - // impossible here) - // 3. the block is otherwise invalid (eg invalid coinbase, - // block is too big, too many legacy sigops, etc). - // So if CheckBlock failed, #3 is the only possibility. - // Under BIP 152, we don't discourage the peer unless proof of work is - // invalid (we don't require all the stateless checks to have - // been run). This is handled below, so just treat this as - // though the block was successfully read, and rely on the - // handling in ProcessNewBlock to ensure the block index is - // updated, etc. + // Block is okay for further processing RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer fBlockRead = true; // mapBlockSource is used for potentially punishing peers and @@ -3566,6 +3543,19 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices); + // BIP-110: Allow up to 2 non-BIP110 outbound peers. + if (pfrom.ExpectServicesFromConn() && !(nServices & NODE_REDUCED_DATA)) { + if (m_num_non_bip110_outbound >= 2) { + LogDebug(BCLog::NET, "peer lacks NODE_REDUCED_DATA and already have 2 non-BIP110 outbound peers, %s\n", + pfrom.DisconnectMsg(fLogIPs)); + pfrom.fDisconnect = true; + return; + } + ++m_num_non_bip110_outbound; + pfrom.m_is_non_bip110_outbound = true; + LogDebug(BCLog::NET, "connected to non-BIP110 outbound peer (%d/2), %s\n", + m_num_non_bip110_outbound.load(), pfrom.ConnectionTypeAsString()); + } peer->m_their_services = nServices; pfrom.SetAddrLocal(addrMe); peer->m_starting_height = starting_height; @@ -3653,10 +3643,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; - LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n", + LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s%s\n", SanitizeString(cleanSubVer, SAFE_CHARS_DEFAULT, true), pfrom.nVersion, peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(), - pfrom.LogIP(fLogIPs), (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); + fLogIPs ? "," : "", pfrom.LogIP(fLogIPs), + (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); peer->m_time_offset = NodeSeconds{std::chrono::seconds{nTime}} - Now(); if (!pfrom.IsInboundConn()) { @@ -3696,11 +3687,12 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // can be triggered by an attacker at high rate. if (!pfrom.IsInboundConn() || LogAcceptCategory(BCLog::NET, BCLog::Level::Debug)) { const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; - LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n", + LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s%s\n", pfrom.ConnectionTypeAsString(), TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type), pfrom.nVersion.load(), peer->m_starting_height, - pfrom.GetId(), pfrom.LogIP(fLogIPs), + pfrom.GetId(), + fLogIPs ? "," : "", pfrom.LogIP(fLogIPs), (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); } @@ -4541,7 +4533,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } std::vector dummy; - status = tempBlock.FillBlock(*pblock, dummy); + const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock))}; + status = tempBlock.FillBlock(*pblock, dummy, + /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); if (status == READ_STATUS_OK) { fBlockReconstructed = true; } @@ -4981,13 +4975,13 @@ bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer) if (pnode.HasPermission(NetPermissionFlags::NoBan)) { // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission - LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id); + LogWarning("Not punishing noban peer %d!", peer.m_id); return false; } if (pnode.IsManualConn()) { // We never disconnect or discourage manual peers for bad behavior - LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id); + LogWarning("Not punishing manually connected peer %d!", peer.m_id); return false; } @@ -5759,7 +5753,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (tx_relay->m_next_inv_send_time < current_time) { fSendTrickle = true; if (pto->IsInboundConn()) { - tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); + tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL, pto->m_network_key); } else { tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL); } diff --git a/src/netbase.cpp b/src/netbase.cpp index 3212f8ee7c..56a848f723 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -103,7 +103,7 @@ enum Network ParseNetwork(const std::string& net_in) { if (net == "ipv6") return NET_IPV6; if (net == "onion") return NET_ONION; if (net == "tor") { - LogPrintf("Warning: net name 'tor' is deprecated and will be removed in the future. You should use 'onion' instead.\n"); + LogWarning("Net name 'tor' is deprecated and will be removed in the future. You should use 'onion' instead."); return NET_ONION; } if (net == "i2p") { diff --git a/src/node/blockmanager_args.cpp b/src/node/blockmanager_args.cpp index 0d96198c25..127e940175 100644 --- a/src/node/blockmanager_args.cpp +++ b/src/node/blockmanager_args.cpp @@ -49,7 +49,11 @@ util::Result ApplyArgsManOptions(const ArgsManager& args, BlockManager::Op if (const auto prune_during_init{args.GetIntArg("-pruneduringinit")}) { if (*prune_during_init == -1) { opts.prune_target_during_init = -1; - } else if (const auto prune_parsed = ParsePruneOption(*prune_during_init, "-pruneduringinit")) { + } else if (auto prune_parsed = ParsePruneOption(*prune_during_init, "-pruneduringinit")) { + if (!*prune_parsed) { + // We don't actually disable pruning, just treat it as manual until sync completes + *prune_parsed = BlockManager::PRUNE_TARGET_MANUAL; + } // NOTE: PRUNE_TARGET_MANUAL is >int64 max opts.prune_target_during_init = std::min(std::numeric_limits::max(), (int64_t)*prune_parsed); } else { diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 54eb6edbaa..438bfb2a19 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -32,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -193,21 +194,19 @@ namespace node { bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const { // First sort by most total work, ... - if (pa->nChainWork > pb->nChainWork) return false; - if (pa->nChainWork < pb->nChainWork) return true; + if (pa->nChainWork != pb->nChainWork) { + return pa->nChainWork < pb->nChainWork; + } // ... then by earliest activatable time, ... - if (pa->nSequenceId < pb->nSequenceId) return false; - if (pa->nSequenceId > pb->nSequenceId) return true; + if (pa->nSequenceId != pb->nSequenceId) { + return pa->nSequenceId > pb->nSequenceId; + } // Use pointer address as tie breaker (should only happen with blocks // loaded from disk, as those share the same id: 0 for blocks on the // best chain, 1 for all others). - if (pa < pb) return false; - if (pa > pb) return true; - - // Identical blocks. - return false; + return pa > pb; } bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const @@ -1287,7 +1286,7 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) } else { // Create initial or missing xor key file AutoFile xor_key_file{fsbridge::fopen(xor_key_path, -#ifdef __MINGW64__ +#if 0 "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210 #else "wbx" @@ -1309,7 +1308,7 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) }; } LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(xor_key)); - return std::vector{xor_key.begin(), xor_key.end()}; + return Obfuscation{xor_key}; } BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts) @@ -1394,7 +1393,7 @@ void ImportBlocks(ChainstateManager& chainman, std::span import_ return; } } else { - LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path)); + LogWarning("Could not open blocks file %s", fs::PathToString(path)); } } diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 4e95299e47..110180570d 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -253,7 +253,7 @@ class BlockManager const bool m_prune_mode; - const std::vector m_xor_key; + const Obfuscation m_xor_key; /** Dirty block index entries. */ std::set m_dirty_blockindex; diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp index c88bd5bad2..39f6943b5e 100644 --- a/src/node/chainstate.cpp +++ b/src/node/chainstate.cpp @@ -151,7 +151,7 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize } LogPrintf("Setting nMinimumChainWork=%s\n", chainman.MinimumChainWork().GetHex()); if (chainman.MinimumChainWork() < UintToArith256(chainman.GetConsensus().nMinimumChainWork)) { - LogPrintf("Warning: nMinimumChainWork set below default value of %s\n", chainman.GetConsensus().nMinimumChainWork.GetHex()); + LogWarning("nMinimumChainWork set below default value of %s", chainman.GetConsensus().nMinimumChainWork.GetHex()); } if (chainman.m_blockman.GetPruneTarget() == BlockManager::PRUNE_TARGET_MANUAL) { LogPrintf("Block pruning enabled. Use RPC call pruneblockchain(height) to manually prune block and undo files.\n"); diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index 5f58fe6a50..7d46831d1e 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -188,7 +189,13 @@ class NodeImpl : public Node }); args().WriteSettingsFile(); } - void mapPort(bool use_upnp, bool use_pcp) override { StartMapPort(use_upnp, use_pcp); } + void mapPort(bool use_upnp, bool use_pcp) override { + if (use_pcp && !MapPortIsProtoEnabled(MapPortProtoFlag::PCP)) { + // Explicitly enabling PCP + g_pcp_warn_for_unauthorized = true; + } + StartMapPort(use_upnp, use_pcp); + } bool getProxy(Network net, Proxy& proxy_info) override { return GetProxy(net, proxy_info); } size_t getNodeCount(ConnectionDirection flags) override { @@ -1003,24 +1010,39 @@ class MinerImpl : public Mining return BlockRef{tip->GetBlockHash(), tip->nHeight}; } - BlockRef waitTipChanged(uint256 current_tip, MillisecondsDouble timeout) override + std::optional waitTipChanged(uint256 current_tip, MillisecondsDouble timeout) override { if (timeout > std::chrono::years{100}) timeout = std::chrono::years{100}; // Upper bound to avoid UB in std::chrono + auto deadline{std::chrono::steady_clock::now() + timeout}; { WAIT_LOCK(notifications().m_tip_block_mutex, lock); - notifications().m_tip_block_cv.wait_for(lock, timeout, [&]() EXCLUSIVE_LOCKS_REQUIRED(notifications().m_tip_block_mutex) { - // We need to wait for m_tip_block to be set AND for the value - // to differ from the current_tip value. - return (notifications().TipBlock() && notifications().TipBlock() != current_tip) || chainman().m_interrupt; + // For callers convenience, wait longer than the provided timeout + // during startup for the tip to be non-null. That way this function + // always returns valid tip information when possible and only + // returns null when shutting down, not when timing out. + notifications().m_tip_block_cv.wait(lock, [&]() EXCLUSIVE_LOCKS_REQUIRED(notifications().m_tip_block_mutex) { + return notifications().TipBlock() || chainman().m_interrupt; + }); + if (chainman().m_interrupt) return {}; + // At this point TipBlock is set, so continue to wait until it is + // different then `current_tip` provided by caller. + notifications().m_tip_block_cv.wait_until(lock, deadline, [&]() EXCLUSIVE_LOCKS_REQUIRED(notifications().m_tip_block_mutex) { + return Assume(notifications().TipBlock()) != current_tip || chainman().m_interrupt; }); } - // Must release m_tip_block_mutex before locking cs_main, to avoid deadlocks. - LOCK(::cs_main); - return BlockRef{chainman().ActiveChain().Tip()->GetBlockHash(), chainman().ActiveChain().Tip()->nHeight}; + + if (chainman().m_interrupt) return {}; + + // Must release m_tip_block_mutex before getTip() locks cs_main, to + // avoid deadlocks. + return getTip(); } std::unique_ptr createNewBlock(const BlockCreateOptions& options) override { + // Ensure m_tip_block is set so consumers of BlockTemplate can rely on that. + if (!waitTipChanged(uint256::ZERO, MillisecondsDouble::max())) return {}; + BlockAssembler::Options assemble_options{options}; ApplyArgsManOptions(*Assert(m_node.args), assemble_options); return createNewBlock2(assemble_options); diff --git a/src/node/mempool_args.cpp b/src/node/mempool_args.cpp index 1d82d8f442..f2f36931cf 100644 --- a/src/node/mempool_args.cpp +++ b/src/node/mempool_args.cpp @@ -207,6 +207,10 @@ util::Result ApplyArgsManOptions(const ArgsManager& argsman, const CChainP if (argsman.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER)) { mempool_opts.max_datacarrier_bytes = argsman.GetIntArg("-datacarriersize", MAX_OP_RETURN_RELAY); + if (mempool_opts.max_datacarrier_bytes.value() > MAX_OUTPUT_DATA_SIZE) { + LogWarning("Limiting datacarriersize to %s", MAX_OUTPUT_DATA_SIZE); + mempool_opts.max_datacarrier_bytes = MAX_OUTPUT_DATA_SIZE; + } } else { mempool_opts.max_datacarrier_bytes = std::nullopt; } diff --git a/src/node/mempool_persist.cpp b/src/node/mempool_persist.cpp index eefa9215d9..9d451a49ee 100644 --- a/src/node/mempool_persist.cpp +++ b/src/node/mempool_persist.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -93,7 +94,7 @@ bool LoadMempool(CTxMemPool& pool, const fs::path& load_path, Chainstate& active try { uint64_t version; file >> version; - std::vector xor_key; + Obfuscation xor_key{}; if (version == MEMPOOL_DUMP_VERSION_NO_XOR_KEY) { // Leave XOR-key empty } else if (version == MEMPOOL_DUMP_VERSION) { @@ -224,9 +225,9 @@ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, FopenFn mock const uint64_t version{pool.m_opts.persist_v1_dat ? MEMPOOL_DUMP_VERSION_NO_XOR_KEY : MEMPOOL_DUMP_VERSION}; file << version; - std::vector xor_key(8); + Obfuscation xor_key{}; if (!pool.m_opts.persist_v1_dat) { - FastRandomContext{}.fillrand(xor_key); + xor_key = Obfuscation{FastRandomContext{}.randbytes()}; file << xor_key; } file.SetXor(xor_key); diff --git a/src/node/miner.cpp b/src/node/miner.cpp index 0b2247a775..8038ac83a4 100644 --- a/src/node/miner.cpp +++ b/src/node/miner.cpp @@ -455,8 +455,8 @@ void BlockAssembler::addPackageTxs(const CTxMemPool& mempool, int& nPackagesSele ++nConsecutiveFailed; - if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight > - m_options.nBlockMaxWeight - BLOCK_FULL_ENOUGH_WEIGHT_DELTA) { + if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight + + BLOCK_FULL_ENOUGH_WEIGHT_DELTA > m_options.nBlockMaxWeight) { // Give up if we're close to full and haven't succeeded in a while break; } diff --git a/src/node/mini_miner.cpp b/src/node/mini_miner.cpp index d7d15554b3..8b15565e26 100644 --- a/src/node/mini_miner.cpp +++ b/src/node/mini_miner.cpp @@ -211,8 +211,7 @@ void MiniMiner::DeleteAncestorPackage(const std::setsecond) { - // If these fail, we must be double-deducting. - Assume(descendant->second.GetModFeesWithAncestors() >= anc->second.GetModifiedFee()); + // If this fails, we must be double-deducting. Don't check fees because negative is possible. Assume(descendant->second.GetSizeWithAncestors() >= anc->second.GetTxSize()); descendant->second.UpdateAncestorState(-anc->second.GetTxSize(), -anc->second.GetModifiedFee()); } @@ -236,10 +235,9 @@ void MiniMiner::SanityCheck() const // m_entries, m_entries_by_txid, and m_descendant_set_by_txid all same size Assume(m_entries.size() == m_entries_by_txid.size()); Assume(m_entries.size() == m_descendant_set_by_txid.size()); - // Cached ancestor values should be at least as large as the transaction's own fee and size + // Cached ancestor values should be at least as large as the transaction's own size Assume(std::all_of(m_entries.begin(), m_entries.end(), [](const auto& entry) { - return entry->second.GetSizeWithAncestors() >= entry->second.GetTxSize() && - entry->second.GetModFeesWithAncestors() >= entry->second.GetModifiedFee();})); + return entry->second.GetSizeWithAncestors() >= entry->second.GetTxSize();})); // None of the entries should be to-be-replaced transactions Assume(std::all_of(m_to_be_replaced.begin(), m_to_be_replaced.end(), [&](const auto& txid){return m_entries_by_txid.find(txid) == m_entries_by_txid.end();})); diff --git a/src/node/utxo_snapshot.cpp b/src/node/utxo_snapshot.cpp index ca5491bdc2..d6e878ae7e 100644 --- a/src/node/utxo_snapshot.cpp +++ b/src/node/utxo_snapshot.cpp @@ -32,14 +32,14 @@ bool WriteSnapshotBaseBlockhash(Chainstate& snapshot_chainstate) FILE* file{fsbridge::fopen(write_to, "wb")}; AutoFile afile{file}; if (afile.IsNull()) { - LogPrintf("[snapshot] failed to open base blockhash file for writing: %s\n", + LogError("[snapshot] failed to open base blockhash file for writing: %s", fs::PathToString(write_to)); return false; } afile << *snapshot_chainstate.m_from_snapshot_blockhash; if (afile.fclose() != 0) { - LogPrintf("[snapshot] failed to close base blockhash file %s after writing\n", + LogError("[snapshot] failed to close base blockhash file %s after writing", fs::PathToString(write_to)); return false; } @@ -49,16 +49,16 @@ bool WriteSnapshotBaseBlockhash(Chainstate& snapshot_chainstate) std::optional ReadSnapshotBaseBlockhash(fs::path chaindir) { if (!fs::exists(chaindir)) { - LogPrintf("[snapshot] cannot read base blockhash: no chainstate dir " - "exists at path %s\n", fs::PathToString(chaindir)); + LogWarning("[snapshot] cannot read base blockhash: no chainstate dir " + "exists at path %s", fs::PathToString(chaindir)); return std::nullopt; } const fs::path read_from = chaindir / node::SNAPSHOT_BLOCKHASH_FILENAME; const std::string read_from_str = fs::PathToString(read_from); if (!fs::exists(read_from)) { - LogPrintf("[snapshot] snapshot chainstate dir is malformed! no base blockhash file " - "exists at path %s. Try deleting %s and calling loadtxoutset again?\n", + LogWarning("[snapshot] snapshot chainstate dir is malformed! no base blockhash file " + "exists at path %s. Try deleting %s and calling loadtxoutset again?", fs::PathToString(chaindir), read_from_str); return std::nullopt; } @@ -67,7 +67,7 @@ std::optional ReadSnapshotBaseBlockhash(fs::path chaindir) FILE* file{fsbridge::fopen(read_from, "rb")}; AutoFile afile{file}; if (afile.IsNull()) { - LogPrintf("[snapshot] failed to open base blockhash file for reading: %s\n", + LogWarning("[snapshot] failed to open base blockhash file for reading: %s", read_from_str); return std::nullopt; } @@ -76,7 +76,7 @@ std::optional ReadSnapshotBaseBlockhash(fs::path chaindir) int64_t position = afile.tell(); afile.seek(0, SEEK_END); if (position != afile.tell()) { - LogPrintf("[snapshot] warning: unexpected trailing data in %s\n", read_from_str); + LogWarning("[snapshot] unexpected trailing data in %s", read_from_str); } return base_blockhash; } diff --git a/src/policy/coin_age_priority.cpp b/src/policy/coin_age_priority.cpp index 41025b2feb..fe9d7994d3 100644 --- a/src/policy/coin_age_priority.cpp +++ b/src/policy/coin_age_priority.cpp @@ -108,7 +108,7 @@ CTxMemPoolEntry::GetPriority(unsigned int currentHeight) const // This will only return accurate results when currentHeight >= the heights // at which all the in-chain inputs of the tx were included in blocks. // Typical usage of GetPriority with chainActive.Height() will ensure this. - int heightDiff = currentHeight - cachedHeight; + int heightDiff = int(currentHeight) - int(cachedHeight); double deltaPriority = ((double)heightDiff*inChainInputValue)/nModSize; double dResult = cachedPriority + deltaPriority; if (dResult < 0) // This should only happen if it was called with an invalid height diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index 01d031a48e..82448342f2 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -423,7 +423,6 @@ void TxConfirmStats::Read(AutoFile& filein, size_t numBuckets) // Read data file and do some very basic sanity checking // buckets and bucketMap are not updated yet, so don't access them // If there is a read failure, we'll just discard this entire object anyway - size_t maxConfirms, maxPeriods; // The current version will store the decay with each individual TxConfirmStats and also keep a scale factor filein >> Using(decay); @@ -444,10 +443,9 @@ void TxConfirmStats::Read(AutoFile& filein, size_t numBuckets) throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count"); } filein >> Using>>(confAvg); - maxPeriods = confAvg.size(); - maxConfirms = scale * maxPeriods; + const size_t maxPeriods = confAvg.size(); - if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) { // one week + if (maxPeriods == 0 || scale > (6 * 24 * 7) / maxPeriods) { // one week throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms"); } for (unsigned int i = 0; i < maxPeriods; i++) { @@ -470,6 +468,7 @@ void TxConfirmStats::Read(AutoFile& filein, size_t numBuckets) // to match the number of confirms and buckets resizeInMemoryCounters(numBuckets); + const size_t maxConfirms = scale * maxPeriods; LogDebug(BCLog::ESTIMATEFEE, "Reading estimates: %u buckets counting confirms up to %u blocks\n", numBuckets, maxConfirms); } @@ -567,12 +566,12 @@ CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath std::chrono::hours file_age = GetFeeEstimatorFileAge(); if (file_age > MAX_FILE_AGE && !read_stale_estimates) { - LogPrintf("Fee estimation file %s too old (age=%lld > %lld hours) and will not be used to avoid serving stale estimates.\n", fs::PathToString(m_estimation_filepath), Ticks(file_age), Ticks(MAX_FILE_AGE)); + LogWarning("Fee estimation file %s too old (age=%lld > %lld hours) and will not be used to avoid serving stale estimates.", fs::PathToString(m_estimation_filepath), Ticks(file_age), Ticks(MAX_FILE_AGE)); return; } if (!Read(est_file)) { - LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath)); + LogWarning("Failed to read fee estimates from %s. Continue anyway.", fs::PathToString(m_estimation_filepath)); } } @@ -955,11 +954,11 @@ bool CBlockPolicyEstimator::FlushFeeEstimates() const { AutoFile est_file{fsbridge::fopen(m_estimation_filepath, "wb")}; if (est_file.IsNull() || !Write(est_file)) { - LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath)); + LogWarning("Failed to write fee estimates to %s. Continue anyway.", fs::PathToString(m_estimation_filepath)); (void)est_file.fclose(); return false; } else if (est_file.fclose() != 0) { - LogError("Failed to close fee estimates file %s: %s. Continuing anyway.", fs::PathToString(m_estimation_filepath), SysErrorString(errno)); + LogWarning("Failed to close fee estimates file %s: %s. Continuing anyway.", fs::PathToString(m_estimation_filepath), SysErrorString(errno)); return false; } else { LogPrintf("Flushed fee estimates to %s.\n", fs::PathToString(m_estimation_filepath.filename())); diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index f1a4bc5bc1..589ba4c8c7 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -462,6 +462,42 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs, return true; } +bool SpendsNonAnchorWitnessProg(const CTransaction& tx, const CCoinsViewCache& prevouts) +{ + if (tx.IsCoinBase()) { + return false; + } + + int version; + std::vector program; + for (const auto& txin: tx.vin) { + const auto& prev_spk{prevouts.AccessCoin(txin.prevout).out.scriptPubKey}; + + // Note this includes not-yet-defined witness programs. + if (prev_spk.IsWitnessProgram(version, program) && !prev_spk.IsPayToAnchor(version, program)) { + return true; + } + + // For P2SH extract the redeem script and check if it spends a non-Taproot witness program. Note + // this is fine to call EvalScript (as done in AreInputsStandard/IsWitnessStandard) because this + // function is only ever called after IsStandardTx, which checks the scriptsig is pushonly. + if (prev_spk.IsPayToScriptHash()) { + // If EvalScript fails or results in an empty stack, the transaction is invalid by consensus. + std::vector > stack; + if (!EvalScript(stack, txin.scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker{}, SigVersion::BASE) + || stack.empty()) { + continue; + } + const CScript redeem_script{stack.back().begin(), stack.back().end()}; + if (redeem_script.IsWitnessProgram(version, program)) { + return true; + } + } + } + + return false; +} + int64_t GetVirtualTransactionSize(int64_t nWeight, int64_t nSigOpCost, unsigned int bytes_per_sigop) { return (std::max(nWeight, nSigOpCost * bytes_per_sigop) + WITNESS_SCALE_FACTOR - 1) / WITNESS_SCALE_FACTOR; diff --git a/src/policy/policy.h b/src/policy/policy.h index a9c557279e..908c2346e0 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -115,12 +115,12 @@ static constexpr unsigned int DEFAULT_DESCENDANT_SIZE_LIMIT_KVB{101}; /** Default for -datacarrier */ static const bool DEFAULT_ACCEPT_DATACARRIER = true; /** - * Default setting for -datacarriersize. 40 bytes of data, +1 for OP_RETURN, - * +1 for the pushdata opcode. + * Default setting for -datacarriersize. 80 bytes of data, +1 for OP_RETURN, + * +2 for the pushdata opcodes. */ /** Default for -permitbaredatacarrier */ static const bool DEFAULT_PERMITBAREDATACARRIER{false}; -static constexpr unsigned int MAX_OP_RETURN_RELAY{42}; +static const unsigned int MAX_OP_RETURN_RELAY = 83; /** Default for -datacarrierfullcount */ static constexpr bool DEFAULT_DATACARRIER_FULLCOUNT{true}; /** @@ -170,7 +170,8 @@ static constexpr unsigned int STANDARD_SCRIPT_VERIFY_FLAGS{MANDATORY_SCRIPT_VERI SCRIPT_VERIFY_CONST_SCRIPTCODE | SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION | SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS | - SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE}; + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE | + REDUCED_DATA_MANDATORY_VERIFY_FLAGS}; /** For convenience, standard but not mandatory verify flags. */ static constexpr unsigned int STANDARD_NOT_MANDATORY_VERIFY_FLAGS{STANDARD_SCRIPT_VERIFY_FLAGS & ~MANDATORY_SCRIPT_VERIFY_FLAGS}; @@ -215,6 +216,11 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs, * Also enforce a maximum stack item size limit and no annexes for tapscript spends. */ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs, const std::string& reason_prefix, std::string& out_reason, const ignore_rejects_type& ignore_rejects=empty_ignore_rejects); +/** + * Check whether this transaction spends any witness program but P2A, including not-yet-defined ones. + * May return `false` early for consensus-invalid transactions. + */ +bool SpendsNonAnchorWitnessProg(const CTransaction& tx, const CCoinsViewCache& prevouts); /** Compute the virtual transaction size (weight reinterpreted as bytes). */ int64_t GetVirtualTransactionSize(int64_t nWeight, int64_t nSigOpCost, unsigned int bytes_per_sigop); diff --git a/src/protocol.cpp b/src/protocol.cpp index 589ff53efb..9a6093678b 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -103,6 +103,7 @@ static std::string serviceFlagToStr(size_t bit) case NODE_UTREEXO_ARCHIVE: return "UTREEXO_ARCHIVE"; case NODE_UTREEXO_TMP: return "UTREEXO_TMP?"; case NODE_REPLACE_BY_FEE: return "REPLACE_BY_FEE?"; + case NODE_REDUCED_DATA: return "REDUCED_DATA?"; case NODE_MALICIOUS: return "MALICIOUS?"; // Not using default, so we get warned when a case is missing } diff --git a/src/protocol.h b/src/protocol.h index ebfd139c25..47a6a330b3 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -347,6 +347,9 @@ enum ServiceFlags : uint64_t { NODE_REPLACE_BY_FEE = (1 << 26), + // NODE_REDUCED_DATA means the node enforces ReducedData rules as applicable + NODE_REDUCED_DATA = (1 << 27), + NODE_MALICIOUS = (1 << 29), }; @@ -363,7 +366,7 @@ std::vector serviceFlagsToStr(uint64_t flags); * should be updated appropriately to filter for nodes with * desired service flags (compatible with our new flags). */ -constexpr ServiceFlags SeedsServiceFlags() { return ServiceFlags(NODE_NETWORK | NODE_WITNESS); } +constexpr ServiceFlags SeedsServiceFlags() { return ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA); } /** * Checks if a peer with the given service flags may be capable of having a diff --git a/src/psbt.cpp b/src/psbt.cpp index 19d855e4c7..7e754e9ed7 100644 --- a/src/psbt.cpp +++ b/src/psbt.cpp @@ -298,7 +298,7 @@ bool PSBTInputSigned(const PSBTInput& input) bool PSBTInputSignedAndVerified(const PartiallySignedTransaction psbt, unsigned int input_index, const PrecomputedTransactionData* txdata) { CTxOut utxo; - assert(psbt.inputs.size() >= input_index); + assert(input_index < psbt.inputs.size()); const PSBTInput& input = psbt.inputs[input_index]; if (input.non_witness_utxo) { diff --git a/src/psbt.h b/src/psbt.h index 6d49864b3c..88a22a84ae 100644 --- a/src/psbt.h +++ b/src/psbt.h @@ -872,7 +872,7 @@ struct PSBTOutput s_tree >> depth; s_tree >> leaf_ver; s_tree >> script; - if (depth > TAPROOT_CONTROL_MAX_NODE_COUNT) { + if (depth > TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED) { throw std::ios_base::failure("Output Taproot tree has as leaf greater than Taproot maximum depth"); } if ((leaf_ver & ~TAPROOT_LEAF_MASK) != 0) { diff --git a/src/qt/CMakeLists.txt b/src/qt/CMakeLists.txt index 6282bd4552..4d1bd8ee1c 100644 --- a/src/qt/CMakeLists.txt +++ b/src/qt/CMakeLists.txt @@ -79,15 +79,20 @@ set(ICO_SPECS 256 64 48 32 24 20 16 ) set(ICNS_SPECS - 1024 512 256 128 32 16 + 1024 ) set(EXTRA_ICON_SPECS 290 256 ) +if(CMAKE_SYSTEM_NAME STREQUAL "Darwin") + set(bitcoin_svg "res/src/bitcoin-mac.svg") +else() + set(bitcoin_svg "res/src/bitcoin.svg") +endif() + set(ICON_SPECS ${ICO_SPECS} ${ICNS_SPECS} ${EXTRA_ICON_SPECS}) list(REMOVE_DUPLICATES ICON_SPECS) -set(bitcoin_svg "res/src/bitcoin.svg") file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${ICON_PATH}") foreach(size IN LISTS ICON_SPECS) set(png "${ICON_PATH}/bitcoin${size}.png") @@ -145,9 +150,9 @@ add_custom_command( ) add_custom_target(generate_ico DEPENDS "${bitcoin_ico}" "${testnet_ico}") -set(logo_svg "res/src/bitcoinknots-logo.svg") -set(png "${ICON_PATH}/bitcoinknots-logo.png") +set(logo_svg "res/src/bitcoin.svg") set(size 960) +set(png "${ICON_PATH}/bitcoin${size}.png") add_custom_command( OUTPUT ${png} COMMAND "${RSVG_CONVERT}" -f png -d ${size} -p ${size} --output "${png}" "${CMAKE_CURRENT_SOURCE_DIR}/${logo_svg}" diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index e2b0304c28..cfadfc924d 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -394,7 +394,7 @@ void BitcoinApplication::initializeResult(bool success, interfaces::BlockAndHead { qDebug() << __func__ << ": Initialization result: " << success; - if (success) { + if (success && !m_node->shutdownRequested()) { delete m_splash; m_splash = nullptr; diff --git a/src/qt/bitcoin_rendered.qrc b/src/qt/bitcoin_rendered.qrc index 05622f51aa..b5656c395b 100644 --- a/src/qt/bitcoin_rendered.qrc +++ b/src/qt/bitcoin_rendered.qrc @@ -1,6 +1,6 @@ res/rendered_icons/bitcoin256.png - res/rendered_icons/bitcoinknots-logo.png + res/rendered_icons/bitcoin960.png diff --git a/src/qt/bitcoinamountfield.cpp b/src/qt/bitcoinamountfield.cpp index 10d5364295..52ada09b57 100644 --- a/src/qt/bitcoinamountfield.cpp +++ b/src/qt/bitcoinamountfield.cpp @@ -277,10 +277,11 @@ bool BitcoinAmountField::validate() void BitcoinAmountField::setValid(bool valid) { - if (valid) - amount->setStyleSheet(""); - else - amount->setStyleSheet(STYLE_INVALID); + const QString style = valid ? QString() : QStringLiteral(STYLE_INVALID); + if (amount->styleSheet() != style) { + // CAUTION: Some Qt styles (Breeze in particular) add event handlers in setStyleSheet, which causes the eventFilter to run infinitely; use a QueuedConnection to change it outside of the eventFilter instead + QMetaObject::invokeMethod(amount, "setStyleSheet", Qt::QueuedConnection, Q_ARG(QString, style)); + } } bool BitcoinAmountField::eventFilter(QObject *object, QEvent *event) diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index fb2922afc4..f4f099e2b5 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -472,7 +472,11 @@ void BitcoinGUI::createActions() //: Label of the input field where the name of the wallet is entered. QString label = tr("Wallet Name"); QString wallet_name = QInputDialog::getText(this, title, label, QLineEdit::Normal, "", &wallet_name_ok); - if (!wallet_name_ok || wallet_name.isEmpty()) return; + if (!wallet_name_ok) return; + if (wallet_name.isEmpty()) { + QMessageBox::critical(nullptr, tr("Invalid Wallet Name"), tr("Wallet name cannot be empty")); + return; + } auto activity = new RestoreWalletActivity(m_wallet_controller, this); connect(activity, &RestoreWalletActivity::restored, this, &BitcoinGUI::setCurrentWallet, Qt::QueuedConnection); diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp index ca462062cf..cc04d09407 100644 --- a/src/qt/clientmodel.cpp +++ b/src/qt/clientmodel.cpp @@ -210,7 +210,7 @@ bool ClientModel::isReleaseVersion() const QString ClientModel::formatClientStartupTime() const { - return QDateTime::fromSecsSinceEpoch(GetStartupTime()).toString(); + return QDateTime::currentDateTime().addSecs(-TicksSeconds(GetUptime())).toString(); } QString ClientModel::dataDir() const diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui index 6450680c7c..576b872f25 100644 --- a/src/qt/forms/debugwindow.ui +++ b/src/qt/forms/debugwindow.ui @@ -573,7 +573,7 @@ - + 0 @@ -1856,6 +1856,10 @@ clear() + + PlainCopyTextEdit + QTextEdit + diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui index f992a304c3..c2f9f53665 100644 --- a/src/qt/forms/optionsdialog.ui +++ b/src/qt/forms/optionsdialog.ui @@ -385,23 +385,13 @@ - - - - Automatically open the Bitcoin client port on the router. This only works when your router supports UPnP and it is enabled. - - - Map port using &UPnP - - - Automatically open the Bitcoin client port on the router. This only works when your router supports PCP or NAT-PMP and it is enabled. The external port could be random. - Map port using PCP or NA&T-PMP + Automatically configure router(s) that support PCP or NA&T-PMP diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index 5c2addd82a..cdc2a50bab 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -516,17 +516,22 @@ bool isObscured(QWidget *w) void bringToFront(QWidget* w) { +#ifdef Q_OS_MACOS + ForceActivation(); +#endif + if (w) { +#if (QT_VERSION < QT_VERSION_CHECK(6, 3, 2)) if (QGuiApplication::platformName() == "wayland") { + // Workaround for bug fixed in https://codereview.qt-project.org/c/qt/qtwayland/+/421125 auto flags = w->windowFlags(); w->setWindowFlags(flags|Qt::WindowStaysOnTopHint); w->show(); w->setWindowFlags(flags); w->show(); - } else { -#ifdef Q_OS_MACOS - ForceActivation(); + } else #endif + { // activateWindow() (sometimes) helps with keyboard focus on Windows if (w->isMinimized()) { w->showNormal(); diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index a469dc1ce9..21fef145e3 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -100,6 +100,7 @@ void OptionsDialog::FixTabOrder(QWidget * const o) struct CreateOptionUIOpts { QBoxLayout *horizontal_layout{nullptr}; int stretch{1}; + int insert_at{-1}; int indent{0}; }; @@ -153,7 +154,7 @@ void OptionsDialog::CreateOptionUI(QBoxLayout * const layout, const QString& tex if (opts.stretch) horizontalLayout->addStretch(opts.stretch); - layout->addLayout(horizontalLayout); + layout->insertLayout(opts.insert_at, horizontalLayout); for (auto& o : objs) { o->setProperty("L", QVariant::fromValue((QLayout*)horizontalLayout)); @@ -266,10 +267,6 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) connect(ui->networkPort, SIGNAL(textChanged(const QString&)), this, SLOT(checkLineEdit())); /* Network elements init */ -#ifndef USE_UPNP - ui->mapPortUpnp->setEnabled(false); -#endif - ui->proxyIp->setEnabled(false); ui->proxyPort->setEnabled(false); ui->proxyPort->setValidator(new QIntValidator(1, 65535, this)); @@ -298,8 +295,29 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) ui->verticalLayout_Wallet->insertWidget(0, walletrbf); FixTabOrder(walletrbf); + QStyleOptionButton styleoptbtn; + const auto checkbox_indent = ui->allowIncoming->style()->subElementRect(QStyle::SE_CheckBoxIndicator, &styleoptbtn, ui->allowIncoming).width(); + /* Network tab */ QLayoutItem *spacer = ui->verticalLayout_Network->takeAt(ui->verticalLayout_Network->count() - 1); + + prevwidget = ui->allowIncoming; + ui->verticalLayout_Network->removeWidget(ui->mapPortNatpmp); + int insert_at = ui->verticalLayout_Network->indexOf(ui->connectSocks); + // NOTE: Re-inserted in bottom-to-top order + CreateOptionUI(ui->verticalLayout_Network, QStringLiteral("%1"), {ui->mapPortNatpmp}, { .insert_at=insert_at, .indent=checkbox_indent, }); + upnp = new QCheckBox(ui->tabNetwork); + upnp->setText(tr("Automatically configure router(s) that support &UPnP")); + upnp->setToolTip(tr("Automatically open the Bitcoin client port on the router. This only works when your router supports UPnP and it is enabled.")); +#ifndef USE_UPNP + upnp->setEnabled(false); +#endif + CreateOptionUI(ui->verticalLayout_Network, QStringLiteral("%1"), {upnp}, { .insert_at=insert_at, .indent=checkbox_indent, }); + connect(ui->allowIncoming, &QPushButton::toggled, upnp, &QWidget::setEnabled); + connect(ui->allowIncoming, &QPushButton::toggled, ui->mapPortNatpmp, &QWidget::setEnabled); + upnp->setEnabled(ui->allowIncoming->isChecked()); + ui->mapPortNatpmp->setEnabled(ui->allowIncoming->isChecked()); + prevwidget = dynamic_cast(ui->verticalLayout_Network->itemAt(ui->verticalLayout_Network->count() - 1))->widget(); blockreconstructionextratxn = new QSpinBox(ui->tabNetwork); @@ -378,7 +396,7 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) rejectunknownwitness = new QCheckBox(groupBox_Spamfiltering); rejectunknownwitness->setText(tr("Reject unknown witness script versions")); - rejectunknownwitness->setToolTip(tr("Some attempts to spam Bitcoin intentionally use undefined witness script formats reserved for future use. By enabling this option, your node will reject transactions using these undefined/future versions. Note that if you send to many addressses in a single transaction, the entire transaction may be rejected if any single one of them attempts to use an undefined format.")); + rejectunknownwitness->setToolTip(tr("Some attempts to spam Bitcoin intentionally use undefined witness script formats reserved for future use. By enabling this option, your node will reject transactions using these undefined/future versions. Note that if you send to many addresses in a single transaction, the entire transaction may be rejected if any single one of them attempts to use an undefined format.")); verticalLayout_Spamfiltering->addWidget(rejectunknownwitness); FixTabOrder(rejectunknownwitness); @@ -552,9 +570,6 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) dustdynamic_multiplier->setValue(DEFAULT_DUST_RELAY_MULTIPLIER / 1000.0); CreateOptionUI(verticalLayout_Spamfiltering, tr("%1 Automatically adjust the dust limit upward to %2 times:"), {dustdynamic_enable, dustdynamic_multiplier}); - QStyleOptionButton styleoptbtn; - const auto checkbox_indent = dustdynamic_enable->style()->subElementRect(QStyle::SE_CheckBoxIndicator, &styleoptbtn, dustdynamic_enable).width(); - dustdynamic_target = new QRadioButton(groupBox_Spamfiltering); dustdynamic_target_blocks = new QSpinBox(groupBox_Spamfiltering); dustdynamic_target_blocks->setMinimum(2); @@ -851,7 +866,7 @@ void OptionsDialog::setMapper() /* Network */ mapper->addMapping(ui->networkPort, OptionsModel::NetworkPort); - mapper->addMapping(ui->mapPortUpnp, OptionsModel::MapPortUPnP); + mapper->addMapping(upnp, OptionsModel::MapPortUPnP); mapper->addMapping(ui->mapPortNatpmp, OptionsModel::MapPortNatpmp); mapper->addMapping(ui->allowIncoming, OptionsModel::Listen); mapper->addMapping(ui->enableServer, OptionsModel::Server); diff --git a/src/qt/optionsdialog.h b/src/qt/optionsdialog.h index 231de3aa4a..f64603fa36 100644 --- a/src/qt/optionsdialog.h +++ b/src/qt/optionsdialog.h @@ -123,6 +123,7 @@ private Q_SLOTS: QCheckBox *walletrbf; + QCheckBox *upnp; QSpinBox *blockreconstructionextratxn; QDoubleSpinBox *blockreconstructionextratxnsize; diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp index dc5eea348f..5afd127ddd 100644 --- a/src/qt/optionsmodel.cpp +++ b/src/qt/optionsmodel.cpp @@ -851,13 +851,17 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value, const std:: case MapPortUPnP: // core option - can be changed on-the-fly if (changed()) { update(value.toBool()); - node().mapPort(value.toBool(), getOption(MapPortNatpmp).toBool()); + if (gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) { + node().mapPort(value.toBool(), getOption(MapPortNatpmp).toBool()); + } } break; case MapPortNatpmp: // core option - can be changed on-the-fly if (changed()) { update(value.toBool()); - node().mapPort(getOption(MapPortUPnP).toBool(), value.toBool()); + if (gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) { + node().mapPort(getOption(MapPortUPnP).toBool(), value.toBool()); + } } break; case MinimizeOnClose: diff --git a/src/qt/res/src/bitcoin-mac.svg b/src/qt/res/src/bitcoin-mac.svg new file mode 100644 index 0000000000..a8162fd75c --- /dev/null +++ b/src/qt/res/src/bitcoin-mac.svg @@ -0,0 +1,37 @@ + + + + + + + + + + + + + + + + + + + + + + 110 + + + + + + diff --git a/src/qt/res/src/nsis-header.svg b/src/qt/res/src/nsis-header.svg index 88cda9a691..c98d251864 100644 --- a/src/qt/res/src/nsis-header.svg +++ b/src/qt/res/src/nsis-header.svg @@ -17,6 +17,6 @@ - Bitcoin - Knots + PyBLOCK + BIP110 diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index 64a5e5dea2..62fcfaf3c7 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -92,6 +92,9 @@ namespace { // don't add private key handling cmd's to the history const QStringList historyFilter = QStringList() + << "createwallet" + << "createwalletdescriptor" + << "migratewallet" << "importprivkey" << "importmulti" << "sethdseed" diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h index 969c204ff2..0be4923bcf 100644 --- a/src/qt/rpcconsole.h +++ b/src/qt/rpcconsole.h @@ -15,6 +15,9 @@ #include #include +#include +#include +#include #include #include @@ -219,4 +222,20 @@ private Q_SLOTS: void updateAlerts(const QString& warnings); }; +/** + * A version of QTextEdit that only populates plaintext mime data from a + * selection, this avoids some bad behavior in QT's HTML->Markdown conversion. + */ +class PlainCopyTextEdit : public QTextEdit { + Q_OBJECT +public: + using QTextEdit::QTextEdit; +protected: + QMimeData* createMimeDataFromSelection() const override { + auto md = new QMimeData(); + md->setText(textCursor().selection().toPlainText()); + return md; + } +}; + #endif // BITCOIN_QT_RPCCONSOLE_H diff --git a/src/qt/splashscreen.cpp b/src/qt/splashscreen.cpp index ce516d87d0..8f26ede832 100644 --- a/src/qt/splashscreen.cpp +++ b/src/qt/splashscreen.cpp @@ -55,12 +55,12 @@ SplashScreen::SplashScreen(const NetworkStyle* networkStyle) pixmap.setDevicePixelRatio(devicePixelRatio); QPainter pixPaint(&pixmap); - pixPaint.setPen(QColor(0x17, 0x17, 0x17)); + pixPaint.setPen(QColor(0xF7, 0x93, 0x1A)); // draw a slightly radial gradient QRadialGradient gradient(QPoint(0,0), splashSize.width()/devicePixelRatio); gradient.setColorAt(0, networkStyle->AdjustColour(QColor(0xff, 0xff, 0xff))); - gradient.setColorAt(1, networkStyle->AdjustColour(QColor(0xff, 0xff, 0xff))); + gradient.setColorAt(1, networkStyle->AdjustColour(QColor(0x0f, 0x0f, 0x0f))); QRect rGradient(QPoint(0,0), splashSize); pixPaint.fillRect(rGradient, gradient); @@ -200,7 +200,7 @@ static void InitMessage(SplashScreen *splash, const std::string &message) Qt::QueuedConnection, Q_ARG(QString, QString::fromStdString(message)), Q_ARG(int, Qt::AlignBottom|Qt::AlignHCenter), - Q_ARG(QColor, QColor(55,55,55))); + Q_ARG(QColor, QColor(220,220,220))); assert(invoked); } diff --git a/src/qt/transactionfilterproxy.cpp b/src/qt/transactionfilterproxy.cpp index 173fd326a3..a17d70130c 100644 --- a/src/qt/transactionfilterproxy.cpp +++ b/src/qt/transactionfilterproxy.cpp @@ -58,28 +58,65 @@ bool TransactionFilterProxy::filterAcceptsRow(int sourceRow, const QModelIndex & void TransactionFilterProxy::setDateRange(const std::optional& from, const std::optional& to) { +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + beginFilterChange(); +#endif + dateFrom = from; dateTo = to; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + endFilterChange(QSortFilterProxyModel::Direction::Rows); +#else invalidateFilter(); +#endif } void TransactionFilterProxy::setSearchString(const QString &search_string) { if (m_search_string == search_string) return; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + beginFilterChange(); +#endif + m_search_string = search_string; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + endFilterChange(QSortFilterProxyModel::Direction::Rows); +#else invalidateFilter(); +#endif } void TransactionFilterProxy::setTypeFilter(quint32 modes) { +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + beginFilterChange(); +#endif + this->typeFilter = modes; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + endFilterChange(QSortFilterProxyModel::Direction::Rows); +#else invalidateFilter(); +#endif } void TransactionFilterProxy::setMinAmount(const CAmount& minimum) { +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + beginFilterChange(); +#endif + this->minAmount = minimum; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + endFilterChange(QSortFilterProxyModel::Direction::Rows); +#else invalidateFilter(); +#endif } void TransactionFilterProxy::setWatchOnlyFilter(WatchOnlyFilter filter) @@ -90,6 +127,15 @@ void TransactionFilterProxy::setWatchOnlyFilter(WatchOnlyFilter filter) void TransactionFilterProxy::setShowInactive(bool _showInactive) { +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + beginFilterChange(); +#endif + this->showInactive = _showInactive; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + endFilterChange(QSortFilterProxyModel::Direction::Rows); +#else invalidateFilter(); +#endif } diff --git a/src/qt/walletcontroller.cpp b/src/qt/walletcontroller.cpp index cc9e8e1b92..afce912f37 100644 --- a/src/qt/walletcontroller.cpp +++ b/src/qt/walletcontroller.cpp @@ -474,7 +474,7 @@ void MigrateWalletActivity::migrate(const std::string& name) auto res{node().walletLoader().migrateWallet(name, passphrase)}; if (res) { - m_success_message = tr("The wallet '%1' was migrated successfully.").arg(GUIUtil::HtmlEscape(GUIUtil::WalletDisplayName(res->wallet->getWalletName()))); + m_success_message = tr("The wallet '%1' was migrated successfully.").arg(GUIUtil::HtmlEscape(GUIUtil::WalletDisplayName(name))); if (res->watchonly_wallet_name) { m_success_message += QChar(' ') + tr("Watchonly scripts have been migrated to a new wallet named '%1'.").arg(GUIUtil::HtmlEscape(GUIUtil::WalletDisplayName(res->watchonly_wallet_name.value()))); } diff --git a/src/random.h b/src/random.h index 203678b17c..a83733b773 100644 --- a/src/random.h +++ b/src/random.h @@ -301,6 +301,15 @@ class RandomMixin return ret; } + /** Generate fixed-size random bytes. */ + template + std::array randbytes() noexcept + { + std::array ret; + Impl().fillrand(MakeWritableByteSpan(ret)); + return ret; + } + /** Generate a random 32-bit integer. */ uint32_t rand32() noexcept { return Impl().template randbits<32>(); } diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 242ad6ce7e..05890057d1 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -82,6 +82,7 @@ using kernel::CCoinsStats; using kernel::CoinStatsHashType; +using interfaces::BlockRef; using interfaces::Mining; using node::BlockManager; using node::NodeContext; @@ -186,7 +187,7 @@ UniValue blockheaderToJSON(const CBlockIndex& tip, const CBlockIndex& blockindex result.pushKV("mediantime", blockindex.GetMedianTimePast()); result.pushKV("nonce", blockindex.nNonce); result.pushKV("bits", strprintf("%08x", blockindex.nBits)); - result.pushKV("target", GetTarget(tip, pow_limit).GetHex()); + result.pushKV("target", GetTarget(blockindex, pow_limit).GetHex()); result.pushKV("difficulty", GetDifficulty(blockindex)); result.pushKV("chainwork", blockindex.nChainWork.GetHex()); result.pushKV("nTx", blockindex.nTx); @@ -322,11 +323,13 @@ static RPCHelpMan waitfornewblock() ? block.hash : ParseHashV(request.params[1], "current_tip")}; - if (IsRPCRunning()) { - // If the user provided an invalid current_tip then this call immediately - // returns the current tip. - block = timeout ? miner.waitTipChanged(tip_hash, std::chrono::milliseconds(timeout)) : miner.waitTipChanged(tip_hash); - } + // If the user provided an invalid current_tip then this call immediately + // returns the current tip. + std::optional new_block = timeout ? miner.waitTipChanged(tip_hash, std::chrono::milliseconds(timeout)) : + miner.waitTipChanged(tip_hash); + + // Return current block upon shutdown + if (new_block) block = *new_block; UniValue ret(UniValue::VOBJ); ret.pushKV("hash", block.hash.GetHex()); @@ -371,15 +374,19 @@ static RPCHelpMan waitforblock() auto block{CHECK_NONFATAL(miner.getTip()).value()}; const auto deadline{std::chrono::steady_clock::now() + 1ms * timeout}; - while (IsRPCRunning() && block.hash != hash) { + while (block.hash != hash) { + std::optional new_block; if (timeout) { auto now{std::chrono::steady_clock::now()}; if (now >= deadline) break; const MillisecondsDouble remaining{deadline - now}; - block = miner.waitTipChanged(block.hash, remaining); + new_block = miner.waitTipChanged(block.hash, remaining); } else { - block = miner.waitTipChanged(block.hash); + new_block = miner.waitTipChanged(block.hash); } + // Return current block upon shutdown + if (!new_block) break; + block = *new_block; } UniValue ret(UniValue::VOBJ); @@ -427,15 +434,19 @@ static RPCHelpMan waitforblockheight() auto block{CHECK_NONFATAL(miner.getTip()).value()}; const auto deadline{std::chrono::steady_clock::now() + 1ms * timeout}; - while (IsRPCRunning() && block.height < height) { + while (block.height < height) { + std::optional new_block; if (timeout) { auto now{std::chrono::steady_clock::now()}; if (now >= deadline) break; const MillisecondsDouble remaining{deadline - now}; - block = miner.waitTipChanged(block.hash, remaining); + new_block = miner.waitTipChanged(block.hash, remaining); } else { - block = miner.waitTipChanged(block.hash); + new_block = miner.waitTipChanged(block.hash); } + // Return current block on shutdown + if (!new_block) break; + block = *new_block; } UniValue ret(UniValue::VOBJ); @@ -1668,6 +1679,7 @@ static void SoftForkDescPushBack(const CBlockIndex* blockindex, UniValue& softfo case ThresholdState::LOCKED_IN: return "locked_in"; case ThresholdState::ACTIVE: return "active"; case ThresholdState::FAILED: return "failed"; + case ThresholdState::EXPIRED: return "expired"; } return "invalid"; }; @@ -1686,6 +1698,9 @@ static void SoftForkDescPushBack(const CBlockIndex* blockindex, UniValue& softfo bip9.pushKV("start_time", chainman.GetConsensus().vDeployments[id].nStartTime); bip9.pushKV("timeout", chainman.GetConsensus().vDeployments[id].nTimeout); bip9.pushKV("min_activation_height", chainman.GetConsensus().vDeployments[id].min_activation_height); + if (chainman.GetConsensus().vDeployments[id].max_activation_height < std::numeric_limits::max()) { + bip9.pushKV("max_activation_height", chainman.GetConsensus().vDeployments[id].max_activation_height); + } // BIP9 status bip9.pushKV("status", get_state_name(current_state)); @@ -1718,7 +1733,13 @@ static void SoftForkDescPushBack(const CBlockIndex* blockindex, UniValue& softfo UniValue rv(UniValue::VOBJ); rv.pushKV("type", "bip9"); if (ThresholdState::ACTIVE == next_state) { - rv.pushKV("height", chainman.m_versionbitscache.StateSinceHeight(blockindex, chainman.GetConsensus(), id)); + const int activation_height = chainman.m_versionbitscache.StateSinceHeight(blockindex, chainman.GetConsensus(), id); + rv.pushKV("height", activation_height); + // Add height_end for temporary softforks + const auto& deployment = chainman.GetConsensus().vDeployments[id]; + if (deployment.active_duration < std::numeric_limits::max()) { + rv.pushKV("height_end", activation_height + deployment.active_duration - 1); + } } rv.pushKV("active", ThresholdState::ACTIVE == next_state); rv.pushKV("bip9", std::move(bip9)); @@ -1815,7 +1836,8 @@ RPCHelpMan getblockchaininfo() namespace { const std::vector RPCHelpForDeployment{ {RPCResult::Type::STR, "type", "one of \"buried\", \"bip9\""}, - {RPCResult::Type::NUM, "height", /*optional=*/true, "height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)"}, + {RPCResult::Type::NUM, "height", /*optional=*/true, "height of the first block which enforces the rules (only for \"buried\" type, or \"bip9\" type with \"active\" status)"}, + {RPCResult::Type::NUM, "height_end", /*optional=*/true, "height of the last block which enforces the rules (only for \"bip9\" type with \"active\" status and temporary deployments)"}, {RPCResult::Type::BOOL, "active", "true if the rules are enforced for the mempool and the next block"}, {RPCResult::Type::OBJ, "bip9", /*optional=*/true, "status of bip9 softforks (only for \"bip9\" type)", { @@ -1823,7 +1845,8 @@ const std::vector RPCHelpForDeployment{ {RPCResult::Type::NUM_TIME, "start_time", "the minimum median time past of a block at which the bit gains its meaning"}, {RPCResult::Type::NUM_TIME, "timeout", "the median time past of a block at which the deployment is considered failed if not yet locked in"}, {RPCResult::Type::NUM, "min_activation_height", "minimum height of blocks for which the rules may be enforced"}, - {RPCResult::Type::STR, "status", "status of deployment at specified block (one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\")"}, + {RPCResult::Type::NUM, "max_activation_height", /*optional=*/true, "height at which the deployment will unconditionally activate (absent for miner-vetoable deployments)"}, + {RPCResult::Type::STR, "status", "status of deployment at specified block (one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\", \"expired\")"}, {RPCResult::Type::NUM, "since", "height of the first block to which the status applies"}, {RPCResult::Type::STR, "status_next", "status of deployment at the next block"}, {RPCResult::Type::OBJ, "statistics", /*optional=*/true, "numeric statistics about signalling for a softfork (only for \"started\" and \"locked_in\" status)", @@ -1849,6 +1872,7 @@ UniValue DeploymentInfo(const CBlockIndex* blockindex, const ChainstateManager& SoftForkDescPushBack(blockindex, softforks, chainman, Consensus::DEPLOYMENT_SEGWIT); SoftForkDescPushBack(blockindex, softforks, chainman, Consensus::DEPLOYMENT_TESTDUMMY); SoftForkDescPushBack(blockindex, softforks, chainman, Consensus::DEPLOYMENT_TAPROOT); + SoftForkDescPushBack(blockindex, softforks, chainman, Consensus::DEPLOYMENT_REDUCED_DATA); return softforks; } } // anon namespace diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index e26de95b08..43a0b01a0e 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -179,6 +180,7 @@ static RPCHelpMan testmempoolaccept() {RPCResult::Type::BOOL, "allowed", /*optional=*/true, "Whether this tx would be accepted to the mempool and pass client-specified maxfeerate. " "If not present, the tx was not fully validated due to a failure in another tx in the list."}, {RPCResult::Type::NUM, "vsize", /*optional=*/true, "Virtual transaction size as defined in BIP 141. This is different from actual serialized size for witness transactions as witness data is discounted (only present when 'allowed' is true)"}, + {RPCResult::Type::NUM, "usage", "Memory usage of transaction for this node"}, {RPCResult::Type::OBJ, "fees", /*optional=*/true, "Transaction fees (only present if 'allowed' is true)", { {RPCResult::Type::STR_AMOUNT, "base", "transaction fee in " + CURRENCY_UNIT}, @@ -254,6 +256,7 @@ static RPCHelpMan testmempoolaccept() UniValue result_inner(UniValue::VOBJ); result_inner.pushKV("txid", tx->GetHash().GetHex()); result_inner.pushKV("wtxid", tx->GetWitnessHash().GetHex()); + result_inner.pushKV("usage", RecursiveDynamicUsage(tx)); if (package_result.m_state.GetResult() == PackageValidationResult::PCKG_POLICY) { result_inner.pushKV("package-error", package_result.m_state.ToString()); } diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 152aa836ab..cf9f5cf10f 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -48,6 +48,7 @@ #include #include +using interfaces::BlockRef; using interfaces::BlockTemplate; using interfaces::Mining; using node::BlockAssembler; @@ -861,7 +862,10 @@ static RPCHelpMan getblocktemplate() { MillisecondsDouble checktxtime{std::chrono::minutes(1)}; while (tip == hashWatchedChain && IsRPCRunning()) { - tip = miner.waitTipChanged(hashWatchedChain, checktxtime).hash; + std::optional maybe_tip{miner.waitTipChanged(hashWatchedChain, checktxtime)}; + // Node is shutting down + if (!maybe_tip) break; + tip = maybe_tip->hash; // Timeout: Check transactions for update // without holding the mempool lock to avoid deadlocks if (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) @@ -1016,12 +1020,14 @@ static UniValue TemplateToJSON(const Consensus::Params& consensusParams, const C } UniValue vbavailable(UniValue::VOBJ); + uint32_t vbrequired = 0; for (int j = 0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { Consensus::DeploymentPos pos = Consensus::DeploymentPos(j); ThresholdState state = chainman.m_versionbitscache.State(pindexPrev, consensusParams, pos); switch (state) { case ThresholdState::DEFINED: case ThresholdState::FAILED: + case ThresholdState::EXPIRED: // Not exposed to GBT at all break; case ThresholdState::LOCKED_IN: @@ -1032,6 +1038,9 @@ static UniValue TemplateToJSON(const Consensus::Params& consensusParams, const C { const struct VBDeploymentInfo& vbinfo = VersionBitsDeploymentInfo[pos]; vbavailable.pushKV(gbt_vb_name(pos), consensusParams.vDeployments[pos].bit); + if (DeploymentMustSignalAfter(pindexPrev, consensusParams, pos, state)) { + vbrequired |= chainman.m_versionbitscache.Mask(consensusParams, pos); + } if (setClientRules.find(vbinfo.name) == setClientRules.end()) { if (!vbinfo.gbt_force) { // If the client doesn't support this, don't indicate it in the [default] version @@ -1058,7 +1067,7 @@ static UniValue TemplateToJSON(const Consensus::Params& consensusParams, const C result.pushKV("version", block_header.nVersion); result.pushKV("rules", std::move(aRules)); result.pushKV("vbavailable", std::move(vbavailable)); - result.pushKV("vbrequired", int(0)); + result.pushKV("vbrequired", vbrequired); result.pushKV("previousblockhash", block.hashPrevBlock.GetHex()); result.pushKV("transactions", std::move(transactions)); diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 8a903b4452..4d2b69b8c4 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -1057,7 +1057,7 @@ static RPCHelpMan addpeeraddress() if (net_addr.has_value()) { CService service{net_addr.value(), port}; - CAddress address{MaybeFlipIPv6toCJDNS(service), ServiceFlags{NODE_NETWORK | NODE_WITNESS}}; + CAddress address{MaybeFlipIPv6toCJDNS(service), ServiceFlags{NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA}}; address.nTime = Now(); // The source address is set equal to the address. This is equivalent to the peer // announcing itself. diff --git a/src/rpc/node.cpp b/src/rpc/node.cpp index c52f15ada7..a13a90bd52 100644 --- a/src/rpc/node.cpp +++ b/src/rpc/node.cpp @@ -224,7 +224,7 @@ static RPCHelpMan getgeneralinfo() obj.pushKV("useragent", strSubVersion); obj.pushKV("datadir", fs::PathToString(args.GetDataDirNet())); obj.pushKV("blocksdir", fs::PathToString(args.GetBlocksDirPath())); - obj.pushKV("startuptime", GetStartupTime()); + obj.pushKV("startuptime", TicksSinceEpoch(NodeClock::now() - GetUptime())); return obj; }, }; diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index e464d9d021..adae441aa3 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -1525,7 +1525,7 @@ static RPCHelpMan finalizepsbt() return RPCHelpMan{"finalizepsbt", "Finalize the inputs of a PSBT. If the transaction is fully signed, it will produce a\n" "network serialized transaction which can be broadcast with sendrawtransaction. Otherwise a PSBT will be\n" - "created which has the final_scriptSig and final_scriptWitness fields filled for inputs that are complete.\n" + "created which has the final_scriptSig and final_scriptwitness fields filled for inputs that are complete.\n" "Implements the Finalizer and Extractor roles.\n", { {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "A base64 string of a PSBT"}, diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index f3a1eceee3..4507dc90a5 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -200,7 +200,7 @@ static RPCHelpMan uptime() }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { - return GetTime() - GetStartupTime(); + return TicksSeconds(GetUptime()); } }; } diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 88966b1a3c..391780034b 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -1967,8 +1967,8 @@ std::vector> ParseScript(uint32_t& key_exp_index // First process all open braces. while (Const("{", expr)) { branches.push_back(false); // new left branch - if (branches.size() > TAPROOT_CONTROL_MAX_NODE_COUNT) { - error = strprintf("tr() supports at most %i nesting levels", TAPROOT_CONTROL_MAX_NODE_COUNT); + if (branches.size() > TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED) { + error = strprintf("tr() supports at most %i nesting levels", TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED); return {}; } } diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 360f6fd9a0..eab89bbdbc 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -433,6 +433,8 @@ bool EvalScript(std::vector >& stack, const CScript& execdata.m_codeseparator_pos = 0xFFFFFFFFUL; execdata.m_codeseparator_pos_init = true; + const unsigned int max_element_size = (flags & SCRIPT_VERIFY_REDUCED_DATA) ? MAX_SCRIPT_ELEMENT_SIZE_REDUCED : MAX_SCRIPT_ELEMENT_SIZE; + try { for (; pc < pend; ++opcode_pos) { @@ -443,7 +445,7 @@ bool EvalScript(std::vector >& stack, const CScript& // if (!script.GetOp(pc, opcode, vchPushValue)) return set_error(serror, SCRIPT_ERR_BAD_OPCODE); - if (vchPushValue.size() > MAX_SCRIPT_ELEMENT_SIZE) + if (vchPushValue.size() > max_element_size) return set_error(serror, SCRIPT_ERR_PUSH_SIZE); if (sigversion == SigVersion::BASE || sigversion == SigVersion::WITNESS_V0) { @@ -616,6 +618,11 @@ bool EvalScript(std::vector >& stack, const CScript& if (vch.size() > 1 || (vch.size() == 1 && vch[0] != 1)) { return set_error(serror, SCRIPT_ERR_TAPSCRIPT_MINIMALIF); } + // REDUCED_DATA bans OP_IF/OP_NOTIF entirely in tapscript; + // reuses MINIMALIF error code as this is a stricter form of the same restriction + if (flags & SCRIPT_VERIFY_REDUCED_DATA) { + return set_error(serror, SCRIPT_ERR_TAPSCRIPT_MINIMALIF); + } } // Under witness v0 rules it is only a policy rule, enabled through SCRIPT_VERIFY_MINIMALIF. if (sigversion == SigVersion::WITNESS_V0 && (flags & SCRIPT_VERIFY_MINIMALIF)) { @@ -1564,11 +1571,57 @@ bool SignatureHashSchnorr(uint256& hash_out, ScriptExecutionData& execdata, cons return true; } +int SigHashCache::CacheIndex(int32_t hash_type) const noexcept +{ + // Note that we do not distinguish between BASE and WITNESS_V0 to determine the cache index, + // because no input can simultaneously use both. + return 3 * !!(hash_type & SIGHASH_ANYONECANPAY) + + 2 * ((hash_type & 0x1f) == SIGHASH_SINGLE) + + 1 * ((hash_type & 0x1f) == SIGHASH_NONE); +} + +bool SigHashCache::Load(int32_t hash_type, const CScript& script_code, HashWriter& writer) const noexcept +{ + auto& entry = m_cache_entries[CacheIndex(hash_type)]; + if (entry.has_value()) { + if (script_code == entry->first) { + writer = HashWriter(entry->second); + return true; + } + } + return false; +} + +void SigHashCache::Store(int32_t hash_type, const CScript& script_code, const HashWriter& writer) noexcept +{ + auto& entry = m_cache_entries[CacheIndex(hash_type)]; + entry.emplace(script_code, writer); +} + template -uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache) +uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache, SigHashCache* sighash_cache) { assert(nIn < txTo.vin.size()); + if (sigversion != SigVersion::WITNESS_V0) { + // Check for invalid use of SIGHASH_SINGLE + if ((nHashType & 0x1f) == SIGHASH_SINGLE) { + if (nIn >= txTo.vout.size()) { + // nOut out of range + return uint256::ONE; + } + } + } + + HashWriter ss{}; + + // Try to compute using cached SHA256 midstate. + if (sighash_cache && sighash_cache->Load(nHashType, scriptCode, ss)) { + // Add sighash type and hash. + ss << nHashType; + return ss.GetHash(); + } + if (sigversion == SigVersion::WITNESS_V0) { uint256 hashPrevouts; uint256 hashSequence; @@ -1583,16 +1636,14 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn hashSequence = cacheready ? cache->hashSequence : SHA256Uint256(GetSequencesSHA256(txTo)); } - if ((nHashType & 0x1f) != SIGHASH_SINGLE && (nHashType & 0x1f) != SIGHASH_NONE) { hashOutputs = cacheready ? cache->hashOutputs : SHA256Uint256(GetOutputsSHA256(txTo)); } else if ((nHashType & 0x1f) == SIGHASH_SINGLE && nIn < txTo.vout.size()) { - HashWriter ss{}; - ss << txTo.vout[nIn]; - hashOutputs = ss.GetHash(); + HashWriter inner_ss{}; + inner_ss << txTo.vout[nIn]; + hashOutputs = inner_ss.GetHash(); } - HashWriter ss{}; // Version ss << txTo.version; // Input prevouts/nSequence (none/all, depending on flags) @@ -1609,26 +1660,21 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn ss << hashOutputs; // Locktime ss << txTo.nLockTime; - // Sighash type - ss << nHashType; + } else { + // Wrapper to serialize only the necessary parts of the transaction being signed + CTransactionSignatureSerializer txTmp(txTo, scriptCode, nIn, nHashType); - return ss.GetHash(); + // Serialize + ss << txTmp; } - // Check for invalid use of SIGHASH_SINGLE - if ((nHashType & 0x1f) == SIGHASH_SINGLE) { - if (nIn >= txTo.vout.size()) { - // nOut out of range - return uint256::ONE; - } + // If a cache object was provided, store the midstate there. + if (sighash_cache != nullptr) { + sighash_cache->Store(nHashType, scriptCode, ss); } - // Wrapper to serialize only the necessary parts of the transaction being signed - CTransactionSignatureSerializer txTmp(txTo, scriptCode, nIn, nHashType); - - // Serialize and hash - HashWriter ss{}; - ss << txTmp << nHashType; + // Add sighash type and hash. + ss << nHashType; return ss.GetHash(); } @@ -1665,7 +1711,7 @@ bool GenericTransactionSignatureChecker::CheckECDSASignature(const std::vecto // Witness sighashes need the amount. if (sigversion == SigVersion::WITNESS_V0 && amount < 0) return HandleMissingData(m_mdb); - uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, this->txdata); + uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, this->txdata, &m_sighash_cache); if (!VerifyECDSASignature(vchSig, pubkey, sighash)) return false; @@ -1819,8 +1865,9 @@ static bool ExecuteWitnessScript(const Span& stack_span, const CS } // Disallow stack item size > MAX_SCRIPT_ELEMENT_SIZE in witness stack + const unsigned int max_element_size = (flags & SCRIPT_VERIFY_REDUCED_DATA) ? MAX_SCRIPT_ELEMENT_SIZE_REDUCED : MAX_SCRIPT_ELEMENT_SIZE; for (const valtype& elem : stack) { - if (elem.size() > MAX_SCRIPT_ELEMENT_SIZE) return set_error(serror, SCRIPT_ERR_PUSH_SIZE); + if (elem.size() > max_element_size) return set_error(serror, SCRIPT_ERR_PUSH_SIZE); } // Run the script interpreter. @@ -1914,6 +1961,9 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, if (stack.size() >= 2 && !stack.back().empty() && stack.back()[0] == ANNEX_TAG) { // Drop annex (this is non-standard; see IsWitnessStandard) const valtype& annex = SpanPopBack(stack); + if (flags & SCRIPT_VERIFY_REDUCED_DATA) { + return set_error(serror, SCRIPT_ERR_PUSH_SIZE); + } execdata.m_annex_hash = (HashWriter{} << annex).GetSHA256(); execdata.m_annex_present = true; } else { @@ -1930,7 +1980,8 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, // Script path spending (stack size is >1 after removing optional annex) const valtype& control = SpanPopBack(stack); const valtype& script = SpanPopBack(stack); - if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > TAPROOT_CONTROL_MAX_SIZE || ((control.size() - TAPROOT_CONTROL_BASE_SIZE) % TAPROOT_CONTROL_NODE_SIZE) != 0) { + const unsigned int max_control_size = (flags & SCRIPT_VERIFY_REDUCED_DATA) ? TAPROOT_CONTROL_MAX_SIZE_REDUCED : TAPROOT_CONTROL_MAX_SIZE; + if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > max_control_size || ((control.size() - TAPROOT_CONTROL_BASE_SIZE) % TAPROOT_CONTROL_NODE_SIZE) != 0) { return set_error(serror, SCRIPT_ERR_TAPROOT_WRONG_CONTROL_SIZE); } execdata.m_tapleaf_hash = ComputeTapleafHash(control[0] & TAPROOT_LEAF_MASK, script); @@ -1950,7 +2001,7 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, } return set_success(serror); } - } else if (!is_p2sh && CScript::IsPayToAnchor(witversion, program)) { + } else if (stack.empty() && !is_p2sh && CScript::IsPayToAnchor(witversion, program)) { return true; } else { if (flags & SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM) { @@ -1979,6 +2030,12 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C // scriptSig and scriptPubKey must be evaluated sequentially on the same stack // rather than being simply concatenated (see CVE-2010-5141) std::vector > stack, stackCopy; + if (scriptPubKey.IsPayToScriptHash()) { + // Disable SCRIPT_VERIFY_REDUCED_DATA for pushing the P2SH redeemScript + if (!EvalScript(stack, scriptSig, flags & ~SCRIPT_VERIFY_REDUCED_DATA, checker, SigVersion::BASE, serror)) + // serror is set + return false; + } else if (!EvalScript(stack, scriptSig, flags, checker, SigVersion::BASE, serror)) // serror is set return false; @@ -2030,6 +2087,15 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C CScript pubKey2(pubKeySerialized.begin(), pubKeySerialized.end()); popstack(stack); + if (flags & SCRIPT_VERIFY_REDUCED_DATA) { + // We bypassed the reduced data check above to exempt redeemScript + // Now enforce it on the rest of the stack items here + // This is sufficient because P2SH requires scriptSig to be push-only + for (const valtype& elem : stack) { + if (elem.size() > MAX_SCRIPT_ELEMENT_SIZE_REDUCED) return set_error(serror, SCRIPT_ERR_PUSH_SIZE); + } + } + if (!EvalScript(stack, pubKey2, flags, checker, SigVersion::BASE, serror)) // serror is set return false; diff --git a/src/script/interpreter.h b/src/script/interpreter.h index 5b20a78d98..392914b71f 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -143,11 +143,25 @@ enum : uint32_t { // Making unknown public key versions (in BIP 342 scripts) non-standard SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE = (1U << 20), + // Enforce MAX_SCRIPT_ELEMENT_SIZE_REDUCED instead of MAX_SCRIPT_ELEMENT_SIZE + // The P2SH redeemScript push is exempted + // Taproot control blocks are limited to TAPROOT_CONTROL_MAX_SIZE_REDUCED + // Taproot annex is also invalid + // OP_IF is also forbidden inside Tapscript + SCRIPT_VERIFY_REDUCED_DATA = (1U << 21), + // Constants to point to the highest flag in use. Add new flags above this line. // SCRIPT_VERIFY_END_MARKER }; +static constexpr unsigned int REDUCED_DATA_MANDATORY_VERIFY_FLAGS{0 + | SCRIPT_VERIFY_REDUCED_DATA + | SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM + | SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION + | SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS +}; + bool CheckSignatureEncoding(const std::vector &vchSig, unsigned int flags, ScriptError* serror); struct PrecomputedTransactionData @@ -234,13 +248,34 @@ static constexpr size_t TAPROOT_CONTROL_BASE_SIZE = 33; static constexpr size_t TAPROOT_CONTROL_NODE_SIZE = 32; static constexpr size_t TAPROOT_CONTROL_MAX_NODE_COUNT = 128; static constexpr size_t TAPROOT_CONTROL_MAX_SIZE = TAPROOT_CONTROL_BASE_SIZE + TAPROOT_CONTROL_NODE_SIZE * TAPROOT_CONTROL_MAX_NODE_COUNT; +static constexpr size_t TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED = 7; +static constexpr size_t TAPROOT_CONTROL_MAX_SIZE_REDUCED = TAPROOT_CONTROL_BASE_SIZE + TAPROOT_CONTROL_NODE_SIZE * TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED; extern const HashWriter HASHER_TAPSIGHASH; //!< Hasher with tag "TapSighash" pre-fed to it. extern const HashWriter HASHER_TAPLEAF; //!< Hasher with tag "TapLeaf" pre-fed to it. extern const HashWriter HASHER_TAPBRANCH; //!< Hasher with tag "TapBranch" pre-fed to it. +/** Data structure to cache SHA256 midstates for the ECDSA sighash calculations + * (bare, P2SH, P2WPKH, P2WSH). */ +class SigHashCache +{ + /** For each sighash mode (ALL, SINGLE, NONE, ALL|ANYONE, SINGLE|ANYONE, NONE|ANYONE), + * optionally store a scriptCode which the hash is for, plus a midstate for the SHA256 + * computation just before adding the hash_type itself. */ + std::optional> m_cache_entries[6]; + + /** Given a hash_type, find which of the 6 cache entries is to be used. */ + int CacheIndex(int32_t hash_type) const noexcept; + +public: + /** Load into writer the SHA256 midstate if found in this cache. */ + [[nodiscard]] bool Load(int32_t hash_type, const CScript& script_code, HashWriter& writer) const noexcept; + /** Store into this cache object the provided SHA256 midstate. */ + void Store(int32_t hash_type, const CScript& script_code, const HashWriter& writer) noexcept; +}; + template -uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr); +uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr, SigHashCache* sighash_cache = nullptr); class BaseSignatureChecker { @@ -289,6 +324,7 @@ class GenericTransactionSignatureChecker : public BaseSignatureChecker unsigned int nIn; const CAmount amount; const PrecomputedTransactionData* txdata; + mutable SigHashCache m_sighash_cache; protected: virtual bool VerifyECDSASignature(const std::vector& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const; diff --git a/src/script/script.h b/src/script/script.h index f38d158119..2e532f9c55 100644 --- a/src/script/script.h +++ b/src/script/script.h @@ -26,6 +26,7 @@ // Maximum number of bytes pushable to the stack static const unsigned int MAX_SCRIPT_ELEMENT_SIZE = 520; +static const unsigned int MAX_SCRIPT_ELEMENT_SIZE_REDUCED = 256; // Maximum number of non-push operations per script static const int MAX_OPS_PER_SCRIPT = 201; diff --git a/src/script/signingprovider.cpp b/src/script/signingprovider.cpp index d029ee1a96..e2f85adfa0 100644 --- a/src/script/signingprovider.cpp +++ b/src/script/signingprovider.cpp @@ -365,7 +365,7 @@ void TaprootBuilder::Insert(TaprootBuilder::NodeInfo&& node, int depth) // as what Insert() performs on the m_branch variable. Instead of // storing a NodeInfo object, just remember whether or not there is one // at that depth. - if (depth < 0 || (size_t)depth > TAPROOT_CONTROL_MAX_NODE_COUNT) return false; + if (depth < 0 || (size_t)depth > TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED) return false; if ((size_t)depth + 1 < branch.size()) return false; while (branch.size() > (size_t)depth && branch[depth]) { branch.pop_back(); @@ -478,7 +478,7 @@ std::optional, int>>> Inf // Skip script records with nonsensical leaf version. if (leaf_ver < 0 || leaf_ver >= 0x100 || leaf_ver & 1) continue; // Skip script records with invalid control block sizes. - if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > TAPROOT_CONTROL_MAX_SIZE || + if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > TAPROOT_CONTROL_MAX_SIZE_REDUCED || ((control.size() - TAPROOT_CONTROL_BASE_SIZE) % TAPROOT_CONTROL_NODE_SIZE) != 0) continue; // Skip script records that don't match the control block. if ((control[0] & TAPROOT_LEAF_MASK) != leaf_ver) continue; diff --git a/src/secp256k1/cmake/CheckX86_64Assembly.cmake b/src/secp256k1/cmake/CheckX86_64Assembly.cmake index ae82cd476e..ca18919e06 100644 --- a/src/secp256k1/cmake/CheckX86_64Assembly.cmake +++ b/src/secp256k1/cmake/CheckX86_64Assembly.cmake @@ -4,10 +4,11 @@ function(check_x86_64_assembly) check_c_source_compiles(" #include - int main() + int main(void) { - uint64_t a = 11, tmp; + uint64_t a = 11, tmp = 0; __asm__ __volatile__(\"movq $0x100000000,%1; mulq %%rsi\" : \"+a\"(a) : \"S\"(tmp) : \"cc\", \"%rdx\"); + return 0; } " HAVE_X86_64_ASM) set(HAVE_X86_64_ASM ${HAVE_X86_64_ASM} PARENT_SCOPE) diff --git a/src/secp256k1/src/modules/schnorrsig/main_impl.h b/src/secp256k1/src/modules/schnorrsig/main_impl.h index 82bba2f597..78c57e553c 100644 --- a/src/secp256k1/src/modules/schnorrsig/main_impl.h +++ b/src/secp256k1/src/modules/schnorrsig/main_impl.h @@ -189,6 +189,7 @@ static int secp256k1_schnorrsig_sign_internal(const secp256k1_context* ctx, unsi secp256k1_scalar_clear(&k); secp256k1_scalar_clear(&sk); secp256k1_memclear(seckey, sizeof(seckey)); + secp256k1_memclear(buf, sizeof(buf)); secp256k1_gej_clear(&rj); return ret; diff --git a/src/streams.cpp b/src/streams.cpp index 4165419332..b9fb0b7562 100644 --- a/src/streams.cpp +++ b/src/streams.cpp @@ -7,11 +7,11 @@ #include #include #include +#include #include -AutoFile::AutoFile(std::FILE* file, std::vector data_xor) - : m_file{file}, m_xor{std::move(data_xor)} +AutoFile::AutoFile(std::FILE* file, const Obfuscation& obfuscation) : m_file{file}, m_obfuscation{obfuscation} { if (!IsNull()) { auto pos{std::ftell(m_file)}; @@ -23,9 +23,9 @@ std::size_t AutoFile::detail_fread(Span dst) { if (!m_file) throw std::ios_base::failure("AutoFile::read: file handle is nullptr"); size_t ret = std::fread(dst.data(), 1, dst.size(), m_file); - if (!m_xor.empty()) { + if (m_obfuscation) { if (!m_position.has_value()) throw std::ios_base::failure("AutoFile::read: position unknown"); - util::Xor(dst.subspan(0, ret), m_xor, *m_position); + m_obfuscation(dst.subspan(0, ret), *m_position); } if (m_position.has_value()) *m_position += ret; return ret; @@ -82,7 +82,7 @@ void AutoFile::ignore(size_t nSize) void AutoFile::write(Span src) { if (!m_file) throw std::ios_base::failure("AutoFile::write: file handle is nullptr"); - if (m_xor.empty()) { + if (!m_obfuscation) { if (std::fwrite(src.data(), 1, src.size(), m_file) != src.size()) { throw std::ios_base::failure("AutoFile::write: write failed"); } @@ -102,9 +102,9 @@ void AutoFile::write(Span src) void AutoFile::write_buffer(std::span src) { if (!m_file) throw std::ios_base::failure("AutoFile::write_buffer: file handle is nullptr"); - if (m_xor.size()) { + if (m_obfuscation) { if (!m_position) throw std::ios_base::failure("AutoFile::write_buffer: obfuscation position unknown"); - util::Xor(src, m_xor, *m_position); // obfuscate in-place + m_obfuscation(src, *m_position); // obfuscate in-place } if (std::fwrite(src.data(), 1, src.size(), m_file) != src.size()) { throw std::ios_base::failure("AutoFile::write_buffer: write failed"); diff --git a/src/streams.h b/src/streams.h index 66bc23771d..26996b1493 100644 --- a/src/streams.h +++ b/src/streams.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -28,27 +29,6 @@ #include #include -namespace util { -inline void Xor(Span write, Span key, size_t key_offset = 0) -{ - if (key.size() == 0) { - return; - } - key_offset %= key.size(); - - for (size_t i = 0, j = key_offset; i != write.size(); i++) { - write[i] ^= key[j++]; - - // This potentially acts on very many bytes of data, so it's - // important that we calculate `j`, i.e. the `key` index in this - // way instead of doing a %, which would effectively be a division - // for each byte Xor'd -- much slower than need be. - if (j == key.size()) - j = 0; - } -} -} // namespace util - /* Minimal stream for overwriting and/or appending to an existing byte vector * * The referenced vector will grow as necessary @@ -277,9 +257,9 @@ class DataStream * * @param[in] key The key used to XOR the data in this stream. */ - void Xor(const std::vector& key) + void Xor(const Obfuscation& key) { - util::Xor(MakeWritableByteSpan(*this), MakeByteSpan(key)); + key(*this); } /** Compute total memory usage of this object (own memory + any dynamic memory). */ @@ -402,12 +382,12 @@ class AutoFile { protected: std::FILE* m_file; - std::vector m_xor; + Obfuscation m_obfuscation; std::optional m_position; bool m_was_written{false}; public: - explicit AutoFile(std::FILE* file, std::vector data_xor={}); + explicit AutoFile(std::FILE* file, const Obfuscation& obfuscation = {}); ~AutoFile() { @@ -455,7 +435,7 @@ class AutoFile bool IsNull() const { return m_file == nullptr; } /** Continue with a different XOR key */ - void SetXor(std::vector data_xor) { m_xor = data_xor; } + void SetXor(const Obfuscation& obfuscation) { m_obfuscation = obfuscation; } /** Implementation detail, only used internally. */ std::size_t detail_fread(Span dst); diff --git a/src/support/allocators/pool.h b/src/support/allocators/pool.h index 873e260b65..9e153fdfad 100644 --- a/src/support/allocators/pool.h +++ b/src/support/allocators/pool.h @@ -153,7 +153,7 @@ class PoolResource final void AllocateChunk() { // if there is still any available memory left, put it into the freelist. - size_t remaining_available_bytes = std::distance(m_available_memory_it, m_available_memory_end); + size_t remaining_available_bytes = m_available_memory_end - m_available_memory_it; if (0 != remaining_available_bytes) { PlacementAddToList(m_available_memory_it, m_free_lists[remaining_available_bytes / ELEM_ALIGN_BYTES]); } diff --git a/src/sync.cpp b/src/sync.cpp index 93c9194541..212e4e36d2 100644 --- a/src/sync.cpp +++ b/src/sync.cpp @@ -90,8 +90,8 @@ LockData& GetLockData() { static void potential_deadlock_detected(const LockPair& mismatch, const LockStack& s1, const LockStack& s2) { - LogPrintf("POTENTIAL DEADLOCK DETECTED\n"); - LogPrintf("Previous lock order was:\n"); + LogError("POTENTIAL DEADLOCK DETECTED"); + LogError("Previous lock order was:"); for (const LockStackItem& i : s1) { std::string prefix{}; if (i.first == mismatch.first) { @@ -100,11 +100,11 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac if (i.first == mismatch.second) { prefix = " (2)"; } - LogPrintf("%s %s\n", prefix, i.second.ToString()); + LogError("%s %s", prefix, i.second.ToString()); } std::string mutex_a, mutex_b; - LogPrintf("Current lock order is:\n"); + LogError("Current lock order is:"); for (const LockStackItem& i : s2) { std::string prefix{}; if (i.first == mismatch.first) { @@ -115,7 +115,7 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac prefix = " (2)"; mutex_b = i.second.Name(); } - LogPrintf("%s %s\n", prefix, i.second.ToString()); + LogError("%s %s", prefix, i.second.ToString()); } if (g_debug_lockorder_abort) { tfm::format(std::cerr, "Assertion failed: detected inconsistent lock order for %s, details in debug log.\n", s2.back().second.ToString()); @@ -126,14 +126,14 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac static void double_lock_detected(const void* mutex, const LockStack& lock_stack) { - LogPrintf("DOUBLE LOCK DETECTED\n"); - LogPrintf("Lock order:\n"); + LogError("DOUBLE LOCK DETECTED"); + LogError("Lock order:"); for (const LockStackItem& i : lock_stack) { std::string prefix{}; if (i.first == mutex) { prefix = " (*)"; } - LogPrintf("%s %s\n", prefix, i.second.ToString()); + LogError("%s %s", prefix, i.second.ToString()); } if (g_debug_lockorder_abort) { tfm::format(std::cerr, @@ -225,10 +225,10 @@ void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, c } } - LogPrintf("INCONSISTENT LOCK ORDER DETECTED\n"); - LogPrintf("Current lock order (least recent first) is:\n"); + LogError("INCONSISTENT LOCK ORDER DETECTED"); + LogError("Current lock order (least recent first) is:"); for (const LockStackItem& i : lock_stack) { - LogPrintf(" %s\n", i.second.ToString()); + LogError(" %s", i.second.ToString()); } if (g_debug_lockorder_abort) { tfm::format(std::cerr, "%s:%s %s was not most recent critical section locked, details in debug log.\n", file, line, guardname); diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index ed95a8831e..d40a0a94ae 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -95,21 +95,21 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) CBlock block2; { PartiallyDownloadedBlock tmp = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions partialBlock = tmp; } // Wrong transaction { PartiallyDownloadedBlock tmp = partialBlock; - partialBlock.FillBlock(block2, {block.vtx[2]}); // Current implementation doesn't check txn here, but don't require that + partialBlock.FillBlock(block2, {block.vtx[2]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that partialBlock = tmp; } bool mutated; BOOST_CHECK(block.hashMerkleRoot != BlockMerkleRoot(block2, &mutated)); CBlock block3; - BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); @@ -182,14 +182,14 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) CBlock block2; { PartiallyDownloadedBlock tmp = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions partialBlock = tmp; } // Wrong transaction { PartiallyDownloadedBlock tmp = partialBlock; - partialBlock.FillBlock(block2, {block.vtx[1]}); // Current implementation doesn't check txn here, but don't require that + partialBlock.FillBlock(block2, {block.vtx[1]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that partialBlock = tmp; } BOOST_CHECK_EQUAL(pool.get(block.vtx[2]->GetHash()).use_count(), SHARED_TX_OFFSET + 2); // +2 because of partialBlock and block2 @@ -198,7 +198,7 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) CBlock block3; PartiallyDownloadedBlock partialBlockCopy = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); @@ -252,7 +252,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) CBlock block2; PartiallyDownloadedBlock partialBlockCopy = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); bool mutated; BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); @@ -300,7 +300,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) CBlock block2; std::vector vtx_missing; - BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); BOOST_CHECK(!mutated); diff --git a/src/test/data/tx_valid.json b/src/test/data/tx_valid.json index 70df0d0f69..547deefe2c 100644 --- a/src/test/data/tx_valid.json +++ b/src/test/data/tx_valid.json @@ -414,9 +414,9 @@ ["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3000]], "0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff03e8030000000000000151d0070000000000000151b80b00000000000001510002483045022100a3cec69b52cba2d2de623ffffffffff1606184ea55476c0f8189fda231bc9cbb022003181ad597f7c380a7d1c740286b1d022b8b04ded028b833282e055e03b8efef812103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000", "DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM"], -["Witness with a push of 520 bytes"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x20 0x33198a9bfef674ebddb9ffaa52928017b8472791e54c609cb95f278ac6b1e349", 1000]], -"0100000000010100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff010000000000000000015102fd08020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002755100000000", "NONE"], +["Witness with a push of 256 bytes (REDUCED_DATA limit)"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x20 0xa57e25ffadd285772f5627ec6fa613bc8fb49b4db475c371dfd4eb76f25c5073", 1000]], +"0100000000010100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff010000000000000000015101fd05014d000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000755100000000", "NONE"], ["Transaction mixing all SigHash, segwit and normal inputs"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 1000], diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp index 3a86036327..781f43b2b6 100644 --- a/src/test/dbwrapper_tests.cpp +++ b/src/test/dbwrapper_tests.cpp @@ -9,39 +9,62 @@ #include #include +#include #include using util::ToString; -// Test if a string consists entirely of null characters -static bool is_null_key(const std::vector& key) { - bool isnull = true; - - for (unsigned int i = 0; i < key.size(); i++) - isnull &= (key[i] == '\x00'); - - return isnull; -} - BOOST_FIXTURE_TEST_SUITE(dbwrapper_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(dbwrapper) { // Perform tests both obfuscated and non-obfuscated. for (const bool obfuscate : {false, true}) { - fs::path ph = m_args.GetDataDirBase() / (obfuscate ? "dbwrapper_obfuscate_true" : "dbwrapper_obfuscate_false"); - CDBWrapper dbw({.path = ph, .cache_bytes = 1 << 20, .memory_only = true, .wipe_data = false, .obfuscate = obfuscate}); - uint8_t key{'k'}; - uint256 in = m_rng.rand256(); - uint256 res; + constexpr size_t CACHE_SIZE{1_MiB}; + const fs::path path{m_args.GetDataDirBase() / "dbwrapper"}; + + Obfuscation obfuscation; + std::vector> key_values{}; + + // Write values + { + CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .wipe_data = true, .obfuscate = obfuscate}}; + BOOST_CHECK_EQUAL(obfuscate, !dbw.IsEmpty()); + + // Ensure that we're doing real obfuscation when obfuscate=true + obfuscation = dbwrapper_private::GetObfuscateKey(dbw); + BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscateKey(dbw)); + + for (uint8_t k{0}; k < 10; ++k) { + uint8_t key{k}; + uint256 value{m_rng.rand256()}; + BOOST_CHECK(dbw.Write(key, value)); + key_values.emplace_back(key, value); + } + } - // Ensure that we're doing real obfuscation when obfuscate=true - BOOST_CHECK(obfuscate != is_null_key(dbwrapper_private::GetObfuscateKey(dbw))); + // Verify that the obfuscation key is never obfuscated + { + CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .obfuscate = false}}; + BOOST_CHECK_EQUAL(obfuscation, dbwrapper_private::GetObfuscateKey(dbw)); + } - BOOST_CHECK(dbw.Write(key, in)); - BOOST_CHECK(dbw.Read(key, res)); - BOOST_CHECK_EQUAL(res.ToString(), in.ToString()); + // Read back the values + { + CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .obfuscate = obfuscate}}; + + // Ensure obfuscation is read back correctly + BOOST_CHECK_EQUAL(obfuscation, dbwrapper_private::GetObfuscateKey(dbw)); + BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscateKey(dbw)); + + // Verify all written values + for (const auto& [key, expected_value] : key_values) { + uint256 read_value{}; + BOOST_CHECK(dbw.Read(key, read_value)); + BOOST_CHECK_EQUAL(read_value, expected_value); + } + } } } @@ -57,7 +80,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data) bool res_bool; // Ensure that we're doing real obfuscation when obfuscate=true - BOOST_CHECK(obfuscate != is_null_key(dbwrapper_private::GetObfuscateKey(dbw))); + BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscateKey(dbw)); //Simulate block raw data - "b + block hash" std::string key_block = "b" + m_rng.rand256().ToString(); @@ -232,7 +255,7 @@ BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate) BOOST_CHECK_EQUAL(res2.ToString(), in.ToString()); BOOST_CHECK(!odbw.IsEmpty()); // There should be existing data - BOOST_CHECK(is_null_key(dbwrapper_private::GetObfuscateKey(odbw))); // The key should be an empty string + BOOST_CHECK(!dbwrapper_private::GetObfuscateKey(odbw)); // The key should be an empty string uint256 in2 = m_rng.rand256(); uint256 res3; @@ -269,7 +292,7 @@ BOOST_AUTO_TEST_CASE(existing_data_reindex) // Check that the key/val we wrote with unobfuscated wrapper doesn't exist uint256 res2; BOOST_CHECK(!odbw.Read(key, res2)); - BOOST_CHECK(!is_null_key(dbwrapper_private::GetObfuscateKey(odbw))); + BOOST_CHECK(dbwrapper_private::GetObfuscateKey(odbw)); uint256 in2 = m_rng.rand256(); uint256 res3; diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index 9ee7e9c9fe..4ecc276081 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -62,13 +62,14 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) CAddress(), /*addrNameIn=*/"", ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/0}; connman.Handshake( /*node=*/dummyNode1, /*successfully_connected=*/true, - /*remote_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS), - /*local_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS), + /*remote_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA), + /*local_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA), /*version=*/PROTOCOL_VERSION, /*relay_txs=*/true); @@ -128,7 +129,8 @@ void AddRandomOutboundPeer(NodeId& id, std::vector& vNodes, PeerManager& CAddress(), /*addrNameIn=*/"", connType, - /*inbound_onion=*/false}); + /*inbound_onion=*/false, + /*network_key=*/0}); CNode &node = *vNodes.back(); node.SetCommonVersion(PROTOCOL_VERSION); @@ -327,7 +329,8 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) CAddress(), /*addrNameIn=*/"", ConnectionType::INBOUND, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/1}; nodes[0]->SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(*nodes[0], NODE_NETWORK); nodes[0]->fSuccessfullyConnected = true; @@ -347,7 +350,8 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) CAddress(), /*addrNameIn=*/"", ConnectionType::INBOUND, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/1}; nodes[1]->SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(*nodes[1], NODE_NETWORK); nodes[1]->fSuccessfullyConnected = true; @@ -377,7 +381,8 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) CAddress(), /*addrNameIn=*/"", ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/2}; nodes[2]->SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(*nodes[2], NODE_NETWORK); nodes[2]->fSuccessfullyConnected = true; @@ -419,7 +424,8 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) CAddress(), /*addrNameIn=*/"", ConnectionType::INBOUND, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/1}; dummyNode.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(dummyNode, NODE_NETWORK); dummyNode.fSuccessfullyConnected = true; diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp index 63c53a842c..223d2934ac 100644 --- a/src/test/descriptor_tests.cpp +++ b/src/test/descriptor_tests.cpp @@ -1006,7 +1006,8 @@ BOOST_AUTO_TEST_CASE(descriptor_test) CheckUnparsable("sh(and_v(vc:andor(pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),pk_k(Kx9HCDjGiwFcgVNhTrS5z5NeZdD6veeam61eDxLDCkGWujvL4Gnn),and_v(v:older(1),pk_k(L4o2kDvXXDRH2VS9uBnouScLduWt4dZnM25se7kvEjJeQ285en2A))),after(10)))", "sh(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10)))", "Miniscript expressions can only be used in wsh or tr."); CheckUnparsable("tr(and_v(vc:andor(pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),pk_k(Kx9HCDjGiwFcgVNhTrS5z5NeZdD6veeam61eDxLDCkGWujvL4Gnn),and_v(v:older(1),pk_k(L4o2kDvXXDRH2VS9uBnouScLduWt4dZnM25se7kvEjJeQ285en2A))),after(10)))", "tr(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10)))", "tr(): key 'and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10))' is not valid"); CheckUnparsable("raw(and_v(vc:andor(pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),pk_k(Kx9HCDjGiwFcgVNhTrS5z5NeZdD6veeam61eDxLDCkGWujvL4Gnn),and_v(v:older(1),pk_k(L4o2kDvXXDRH2VS9uBnouScLduWt4dZnM25se7kvEjJeQ285en2A))),after(10)))", "sh(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10)))", "Miniscript expressions can only be used in wsh or tr."); - CheckUnparsable("", "tr(034D2224bbbbbbbbbbcbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb40,{{{{{{{{{{{{{{{{{{{{{{multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/967808'/9,xprvA1RpRA33e1JQ7ifknakTFNpgXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/968/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/585/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/2/0/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/5/8/5/8/24/5/58/52/5/8/5/2/8/24/5/58/588/246/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/5/4/5/58/55/58/2/5/8/55/2/5/8/58/555/58/2/5/8/4//2/5/58/5w/2/5/8/5/2/4/5/58/5558'/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/8/58/2/5/58/58/2/5/8/9/588/2/58/2/5/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/82/5/8/5/5/58/52/6/8/5/2/8/{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}{{{{{{{{{DDD2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8588/246/8/5/2DLDDDDDDDbbD3DDDD/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8D)/5/2/5/58/58/2/5/58/58/58/588/2/58/2/5/8/5/25/58/58/2/5/58/58/2/5/8/9/588/2/58/2/6780,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFW/8/5/2/5/58678008')", "'multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/967808'/9,xprvA1RpRA33e1JQ7ifknakTFNpgXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/968/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/585/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/2/0/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/5/8/5/8/24/5/58/52/5/8/5/2/8/24/5/58/588/246/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/5/4/5/58/55/58/2/5/8/55/2/5/8/58/555/58/2/5/8/4//2/5/58/5w/2/5/8/5/2/4/5/58/5558'/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/8/58/2/5/58/58/2/5/8/9/588/2/58/2/5/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/82/5/8/5/5/58/52/6/8/5/2/8/{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}{{{{{{{{{DDD2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8588/246/8/5/2DLDDDDDDDbbD3DDDD/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8D)/5/2/5/58/58/2/5/58/58/58/588/2/58/2/5/8/5/25/58/58/2/5/58/58/2/5/8/9/588/2/58/2/6780,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFW/8/5/2/5/58678008'' is not a valid descriptor function"); + // REDUCED_DATA limits Taproot nesting to 7 levels, so this test now hits that limit before the multi() error + CheckUnparsable("", "tr(034D2224bbbbbbbbbbcbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb40,{{{{{{{{{{{{{{{{{{{{{{multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/967808'/9,xprvA1RpRA33e1JQ7ifknakTFNpgXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/968/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/585/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/2/0/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/5/8/5/8/24/5/58/52/5/8/5/2/8/24/5/58/588/246/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/5/4/5/58/55/58/2/5/8/55/2/5/8/58/555/58/2/5/8/4//2/5/58/5w/2/5/8/5/2/4/5/58/5558'/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/8/58/2/5/58/58/2/5/8/9/588/2/58/2/5/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/82/5/8/5/5/58/52/6/8/5/2/8/{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}{{{{{{{{{DDD2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8588/246/8/5/2DLDDDDDDDbbD3DDDD/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8D)/5/2/5/58/58/2/5/58/58/58/588/2/58/2/5/8/5/25/58/58/2/5/58/58/2/5/8/9/588/2/58/2/6780,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFW/8/5/2/5/58678008')", "tr() supports at most 7 nesting levels"); // No uncompressed keys allowed CheckUnparsable("", "wsh(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(049228de6902abb4f541791f6d7f925b10e2078ccb1298856e5ea5cc5fd667f930eac37a00cc07f9a91ef3c2d17bf7a17db04552ff90ac312a5b8b4caca6c97aa4))),after(10)))", "Uncompressed keys are not allowed"); // No hybrid keys allowed @@ -1047,7 +1048,8 @@ BOOST_AUTO_TEST_CASE(descriptor_test) Check("wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE_FAILS, {{"0020cf62bf97baf977aec69cbc290c372899f913337a9093e8f066ab59b8657a365c"}}, OutputType::BECH32, /*op_desc_id=*/uint256{"8412ba3ac20ba3a30f81442d10d32e0468fa52814960d04e959bf84a9b813b88"}, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {}); Check("wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE, {{"0020cf62bf97baf977aec69cbc290c372899f913337a9093e8f066ab59b8657a365c"}}, OutputType::BECH32, /*op_desc_id=*/uint256{"8412ba3ac20ba3a30f81442d10d32e0468fa52814960d04e959bf84a9b813b88"}, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {{"ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588"_hex_v_u8, "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"_hex_v_u8}}); // Can have a Miniscript expression under tr() if it's alone. - Check("tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,thresh(2,pk(L1NKM8dVA1h52mwDrmk1YreTWkAZZTu2vmKLpmLEbFRqGQYjHeEV),s:pk(Kz3iCBy3HNGP5CZWDsAMmnCMFNwqdDohudVN9fvkrN7tAkzKNtM7),adv:older(42)))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,thresh(2,pk(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529),s:pk(9918d400c1b8c3c478340a40117ced4054b6b58f48cdb3c89b836bdfee1f5766),adv:older(42)))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,thresh(2,pk(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529),s:pk(9918d400c1b8c3c478340a40117ced4054b6b58f48cdb3c89b836bdfee1f5766),adv:older(42)))", MISSING_PRIVKEYS | XONLY_KEYS | SIGNABLE, {{"512033982eebe204dc66508e4b19cfc31b5ffc6e1bfcbf6e5597dfc2521a52270795"}}, OutputType::BECH32M); + // Note: thresh() uses OP_IF which is forbidden with REDUCED_DATA, so using and_v() instead + Check("tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,and_v(v:pk(L1NKM8dVA1h52mwDrmk1YreTWkAZZTu2vmKLpmLEbFRqGQYjHeEV),pk(Kz3iCBy3HNGP5CZWDsAMmnCMFNwqdDohudVN9fvkrN7tAkzKNtM7)))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,and_v(v:pk(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529),pk(9918d400c1b8c3c478340a40117ced4054b6b58f48cdb3c89b836bdfee1f5766)))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,and_v(v:pk(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529),pk(9918d400c1b8c3c478340a40117ced4054b6b58f48cdb3c89b836bdfee1f5766)))", MISSING_PRIVKEYS | XONLY_KEYS | SIGNABLE, {{"51202aca0fdcbfbc513549e2c9490e60ba54e3c345ff01d667c4f846c802c0e7b8f4"}}, OutputType::BECH32M); // Can have a pkh() expression alone as tr() script path (because pkh() is valid Miniscript). Check("tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,pkh(L1NKM8dVA1h52mwDrmk1YreTWkAZZTu2vmKLpmLEbFRqGQYjHeEV))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,pkh(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,pkh(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529))", MISSING_PRIVKEYS | XONLY_KEYS | SIGNABLE, {{"51201e9875f690f5847404e4c5951e2f029887df0525691ee11a682afd37b608aad4"}}, OutputType::BECH32M); // Can have a Miniscript expression under tr() if it's part of a tree. diff --git a/src/test/fuzz/autofile.cpp b/src/test/fuzz/autofile.cpp index f081ca5545..d6ac59786d 100644 --- a/src/test/fuzz/autofile.cpp +++ b/src/test/fuzz/autofile.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -18,9 +19,10 @@ FUZZ_TARGET(autofile) { FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; FuzzedFileProvider fuzzed_file_provider{fuzzed_data_provider}; + const auto key_bytes{ConsumeFixedLengthByteVector(fuzzed_data_provider, Obfuscation::KEY_SIZE)}; AutoFile auto_file{ fuzzed_file_provider.open(), - ConsumeRandomLengthByteVector(fuzzed_data_provider), + Obfuscation{std::span{key_bytes}.first()}, }; LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 100) { diff --git a/src/test/fuzz/buffered_file.cpp b/src/test/fuzz/buffered_file.cpp index 2923c39aaf..1bc3b4ba8f 100644 --- a/src/test/fuzz/buffered_file.cpp +++ b/src/test/fuzz/buffered_file.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -20,9 +21,10 @@ FUZZ_TARGET(buffered_file) FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; FuzzedFileProvider fuzzed_file_provider{fuzzed_data_provider}; std::optional opt_buffered_file; + const auto key_bytes{ConsumeFixedLengthByteVector(fuzzed_data_provider, Obfuscation::KEY_SIZE)}; AutoFile fuzzed_file{ fuzzed_file_provider.open(), - ConsumeRandomLengthByteVector(fuzzed_data_provider), + Obfuscation{std::span{key_bytes}.first()}, }; try { auto n_buf_size = fuzzed_data_provider.ConsumeIntegralInRange(0, 4096); diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp index 06145e0323..6cf7278420 100644 --- a/src/test/fuzz/coins_view.cpp +++ b/src/test/fuzz/coins_view.cpp @@ -256,7 +256,7 @@ FUZZ_TARGET(coins_view, .init = initialize_coins_view) // It is not allowed to call CheckTxInputs if CheckTransaction failed return; } - if (Consensus::CheckTxInputs(transaction, state, coins_view_cache, fuzzed_data_provider.ConsumeIntegralInRange(0, std::numeric_limits::max()), tx_fee_out)) { + if (Consensus::CheckTxInputs(transaction, state, coins_view_cache, fuzzed_data_provider.ConsumeIntegralInRange(0, std::numeric_limits::max()), tx_fee_out, CheckTxInputsRules::OutputSizeLimit)) { assert(MoneyRange(tx_fee_out)); } }, diff --git a/src/test/fuzz/miniscript.cpp b/src/test/fuzz/miniscript.cpp index 60d096bb5a..132f9f9d4d 100644 --- a/src/test/fuzz/miniscript.cpp +++ b/src/test/fuzz/miniscript.cpp @@ -1132,13 +1132,25 @@ void TestNode(const MsCtx script_ctx, const NodeRef& node, FuzzedDataProvider& p SatisfactionToWitness(script_ctx, witness_nonmal, script, builder); ScriptError serror; bool res = VerifyScript(DUMMY_SCRIPTSIG, script_pubkey, &witness_nonmal, STANDARD_SCRIPT_VERIFY_FLAGS, CHECKER_CTX, &serror); - // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions(). - if (node->ValidSatisfactions()) assert(res); + // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions(), unless REDUCED_DATA rules are violated. + if (node->ValidSatisfactions()) { + assert(res || + serror == ScriptError::SCRIPT_ERR_PUSH_SIZE || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS || + serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF); + } // More detailed: non-malleable satisfactions must be valid, or could fail with ops count error (if CheckOpsLimit failed), - // or with a stack size error (if CheckStackSize check failed). + // or with a stack size error (if CheckStackSize check failed), or with REDUCED_DATA-related errors. assert(res || (!node->CheckOpsLimit() && serror == ScriptError::SCRIPT_ERR_OP_COUNT) || - (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE)); + (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE) || + serror == ScriptError::SCRIPT_ERR_PUSH_SIZE || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS || + serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF); } if (mal_success && (!nonmal_success || witness_mal.stack != witness_nonmal.stack)) { @@ -1148,8 +1160,15 @@ void TestNode(const MsCtx script_ctx, const NodeRef& node, FuzzedDataProvider& p ScriptError serror; bool res = VerifyScript(DUMMY_SCRIPTSIG, script_pubkey, &witness_mal, STANDARD_SCRIPT_VERIFY_FLAGS, CHECKER_CTX, &serror); // Malleable satisfactions are not guaranteed to be valid under any conditions, but they can only - // fail due to stack or ops limits. - assert(res || serror == ScriptError::SCRIPT_ERR_OP_COUNT || serror == ScriptError::SCRIPT_ERR_STACK_SIZE); + // fail due to stack or ops limits, or REDUCED_DATA-related errors. + assert(res || + serror == ScriptError::SCRIPT_ERR_OP_COUNT || + serror == ScriptError::SCRIPT_ERR_STACK_SIZE || + serror == ScriptError::SCRIPT_ERR_PUSH_SIZE || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS || + serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF); } if (node->IsSane()) { diff --git a/src/test/fuzz/p2p_headers_presync.cpp b/src/test/fuzz/p2p_headers_presync.cpp index ed7041ad1f..d4cdc384bb 100644 --- a/src/test/fuzz/p2p_headers_presync.cpp +++ b/src/test/fuzz/p2p_headers_presync.cpp @@ -60,7 +60,7 @@ void HeadersSyncSetup::ResetAndInitialize() for (auto conn_type : conn_types) { CAddress addr{}; - m_connections.push_back(new CNode(id++, nullptr, addr, 0, 0, addr, "", conn_type, false)); + m_connections.push_back(new CNode(id++, nullptr, addr, 0, 0, addr, "", conn_type, false, 0)); CNode& p2p_node = *m_connections.back(); connman.Handshake( diff --git a/src/test/fuzz/package_eval.cpp b/src/test/fuzz/package_eval.cpp index 8e3d84a9e6..37b18a5941 100644 --- a/src/test/fuzz/package_eval.cpp +++ b/src/test/fuzz/package_eval.cpp @@ -324,7 +324,7 @@ FUZZ_TARGET(ephemeral_package_eval, .init = initialize_tx_pool) return ProcessNewPackage(chainstate, tx_pool, txs, /*test_accept=*/single_submit, /*client_maxfeerate=*/{})); const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, txs.back(), GetTime(), - /*bypass_limits=*/fuzzed_data_provider.ConsumeBool(), /*test_accept=*/!single_submit)); + /*bypass_limits=*/false, /*test_accept=*/!single_submit)); if (!single_submit && result_package.m_state.GetResult() != PackageValidationResult::PCKG_POLICY) { // We don't know anything about the validity since transactions were randomly generated, so diff --git a/src/test/fuzz/partially_downloaded_block.cpp b/src/test/fuzz/partially_downloaded_block.cpp index 7d744728a9..8e22869a90 100644 --- a/src/test/fuzz/partially_downloaded_block.cpp +++ b/src/test/fuzz/partially_downloaded_block.cpp @@ -32,14 +32,10 @@ void initialize_pdb() g_setup = testing_setup.get(); } -PartiallyDownloadedBlock::CheckBlockFn FuzzedCheckBlock(std::optional result) +PartiallyDownloadedBlock::IsBlockMutatedFn FuzzedIsBlockMutated(bool result) { - return [result](const CBlock&, BlockValidationState& state, const Consensus::Params&, bool, bool) { - if (result) { - return state.Invalid(*result); - } - - return true; + return [result](const CBlock& block, bool) { + return result; }; } @@ -111,36 +107,22 @@ FUZZ_TARGET(partially_downloaded_block, .init = initialize_pdb) skipped_missing |= (!pdb.IsTxAvailable(i) && skip); } - // Mock CheckBlock - bool fail_check_block{fuzzed_data_provider.ConsumeBool()}; - auto validation_result = - fuzzed_data_provider.PickValueInArray( - {BlockValidationResult::BLOCK_RESULT_UNSET, - BlockValidationResult::BLOCK_CONSENSUS, - BlockValidationResult::BLOCK_CACHED_INVALID, - BlockValidationResult::BLOCK_INVALID_HEADER, - BlockValidationResult::BLOCK_MUTATED, - BlockValidationResult::BLOCK_MISSING_PREV, - BlockValidationResult::BLOCK_INVALID_PREV, - BlockValidationResult::BLOCK_TIME_FUTURE, - BlockValidationResult::BLOCK_CHECKPOINT, - BlockValidationResult::BLOCK_HEADER_LOW_WORK}); - pdb.m_check_block_mock = FuzzedCheckBlock( - fail_check_block ? - std::optional{validation_result} : - std::nullopt); + bool segwit_active{fuzzed_data_provider.ConsumeBool()}; + + // Mock IsBlockMutated + bool fail_block_mutated{fuzzed_data_provider.ConsumeBool()}; + pdb.m_check_block_mutated_mock = FuzzedIsBlockMutated(fail_block_mutated); CBlock reconstructed_block; - auto fill_status{pdb.FillBlock(reconstructed_block, missing)}; + auto fill_status{pdb.FillBlock(reconstructed_block, missing, segwit_active)}; switch (fill_status) { case READ_STATUS_OK: assert(!skipped_missing); - assert(!fail_check_block); + assert(!fail_block_mutated); assert(block->GetHash() == reconstructed_block.GetHash()); break; - case READ_STATUS_CHECKBLOCK_FAILED: [[fallthrough]]; case READ_STATUS_FAILED: - assert(fail_check_block); + assert(fail_block_mutated); break; case READ_STATUS_INVALID: break; diff --git a/src/test/fuzz/pcp.cpp b/src/test/fuzz/pcp.cpp index 76fdded188..bf95dc916c 100644 --- a/src/test/fuzz/pcp.cpp +++ b/src/test/fuzz/pcp.cpp @@ -4,6 +4,7 @@ #include #include +#include #include #include diff --git a/src/test/fuzz/script_interpreter.cpp b/src/test/fuzz/script_interpreter.cpp index 9e3ad02b2e..2c2ce855d4 100644 --- a/src/test/fuzz/script_interpreter.cpp +++ b/src/test/fuzz/script_interpreter.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -45,3 +46,27 @@ FUZZ_TARGET(script_interpreter) (void)CastToBool(ConsumeRandomLengthByteVector(fuzzed_data_provider)); } } + +/** Differential fuzzing for SignatureHash with and without cache. */ +FUZZ_TARGET(sighash_cache) +{ + FuzzedDataProvider provider(buffer.data(), buffer.size()); + + // Get inputs to the sighash function that won't change across types. + const auto scriptcode{ConsumeScript(provider)}; + const auto tx{ConsumeTransaction(provider, std::nullopt)}; + if (tx.vin.empty()) return; + const auto in_index{provider.ConsumeIntegralInRange(0, tx.vin.size() - 1)}; + const auto amount{ConsumeMoney(provider)}; + const auto sigversion{(SigVersion)provider.ConsumeIntegralInRange(0, 1)}; + + // Check the sighash function will give the same result for 100 fuzzer-generated hash types whether or not a cache is + // provided. The cache is conserved across types to exercise cache hits. + SigHashCache sighash_cache{}; + for (int i{0}; i < 100; ++i) { + const auto hash_type{((i & 2) == 0) ? provider.ConsumeIntegral() : provider.ConsumeIntegral()}; + const auto nocache_res{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion)}; + const auto cache_res{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &sighash_cache)}; + Assert(nocache_res == cache_res); + } +} diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp index 8c9a32068e..2deccf93dc 100644 --- a/src/test/fuzz/tx_pool.cpp +++ b/src/test/fuzz/tx_pool.cpp @@ -296,7 +296,6 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool) std::set added; auto txr = std::make_shared(removed, added); node.validation_signals->RegisterSharedValidationInterface(txr); - const bool bypass_limits = fuzzed_data_provider.ConsumeBool(); // Make sure ProcessNewPackage on one transaction works. // The result is not guaranteed to be the same as what is returned by ATMP. @@ -311,7 +310,7 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool) it->second.m_result_type == MempoolAcceptResult::ResultType::INVALID); } - const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), bypass_limits, /*test_accept=*/false)); + const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), /*bypass_limits=*/false, /*test_accept=*/false)); const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID; node.validation_signals->SyncWithValidationInterfaceQueue(); node.validation_signals->UnregisterSharedValidationInterface(txr); @@ -394,6 +393,9 @@ FUZZ_TARGET(tx_pool, .init = initialize_tx_pool) chainstate.SetMempool(&tx_pool); + // If we ever bypass limits, do not do TRUC invariants checks + bool ever_bypassed_limits{false}; + LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300) { const auto mut_tx = ConsumeTransaction(fuzzed_data_provider, txids); @@ -412,13 +414,17 @@ FUZZ_TARGET(tx_pool, .init = initialize_tx_pool) tx_pool.PrioritiseTransaction(txid.ToUint256(), delta); } + const bool bypass_limits{fuzzed_data_provider.ConsumeBool()}; + ever_bypassed_limits |= bypass_limits; + const auto tx = MakeTransactionRef(mut_tx); - const bool bypass_limits = fuzzed_data_provider.ConsumeBool(); const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), bypass_limits, /*test_accept=*/false)); const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID; if (accepted) { txids.push_back(tx->GetHash()); - CheckMempoolTRUCInvariants(tx_pool); + if (!ever_bypassed_limits) { + CheckMempoolTRUCInvariants(tx_pool); + } } } Finish(fuzzed_data_provider, tx_pool, chainstate); diff --git a/src/test/fuzz/util/net.h b/src/test/fuzz/util/net.h index 698001a7f1..381103aa8b 100644 --- a/src/test/fuzz/util/net.h +++ b/src/test/fuzz/util/net.h @@ -239,6 +239,8 @@ auto ConsumeNode(FuzzedDataProvider& fuzzed_data_provider, const std::optional(); + NetPermissionFlags permission_flags = ConsumeWeakEnum(fuzzed_data_provider, ALL_NET_PERMISSION_FLAGS); if constexpr (ReturnUniquePtr) { return std::make_unique(node_id, @@ -250,6 +252,7 @@ auto ConsumeNode(FuzzedDataProvider& fuzzed_data_provider, const std::optional 0); assert(0 <= m_threshold && m_threshold <= m_period); assert(0 <= m_bit && m_bit < 32 && m_bit < VERSIONBITS_NUM_BITS); assert(0 <= m_min_activation_height); + assert(m_active_duration > 0); } bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override { return Condition(pindex->nVersion); } @@ -49,6 +51,7 @@ class TestConditionChecker : public AbstractThresholdConditionChecker int Period(const Consensus::Params& params) const override { return m_period; } int Threshold(const Consensus::Params& params) const override { return m_threshold; } int MinActivationHeight(const Consensus::Params& params) const override { return m_min_activation_height; } + int ActiveDuration(const Consensus::Params& params) const override { return m_active_duration; } ThresholdState GetStateFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateFor(pindexPrev, dummy_params, m_cache); } int GetStateSinceHeightFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateSinceHeightFor(pindexPrev, dummy_params, m_cache); } @@ -168,8 +171,9 @@ FUZZ_TARGET(versionbits, .init = initialize) timeout = fuzzed_data_provider.ConsumeBool() ? Consensus::BIP9Deployment::NO_TIMEOUT : fuzzed_data_provider.ConsumeIntegral(); } int min_activation = fuzzed_data_provider.ConsumeIntegralInRange(0, period * max_periods); + int active_duration = fuzzed_data_provider.ConsumeBool() ? std::numeric_limits::max() : (fuzzed_data_provider.ConsumeIntegralInRange(1, max_periods) * period); - TestConditionChecker checker(start_time, timeout, period, threshold, min_activation, bit); + TestConditionChecker checker(start_time, timeout, period, threshold, min_activation, active_duration, bit); // Early exit if the versions don't signal sensibly for the deployment if (!checker.Condition(ver_signal)) return; @@ -337,13 +341,22 @@ FUZZ_TARGET(versionbits, .init = initialize) assert(exp_state == ThresholdState::FAILED); } break; + case ThresholdState::EXPIRED: + assert(!always_active_test); + assert(active_duration < std::numeric_limits::max()); + assert(min_activation <= current_block->nHeight + 1); + assert(exp_state == ThresholdState::EXPIRED || exp_state == ThresholdState::ACTIVE); + if (exp_state == ThresholdState::ACTIVE) { + assert(since == exp_since + active_duration); // EXPIRED starts exactly active_duration blocks after ACTIVE started + } + break; default: assert(false); } if (blocks.size() >= period * max_periods) { // we chose the timeout (and block times) so that by the time we have this many blocks it's all over - assert(state == ThresholdState::ACTIVE || state == ThresholdState::FAILED); + assert(state == ThresholdState::ACTIVE || state == ThresholdState::FAILED || state == ThresholdState::EXPIRED); } if (always_active_test) { diff --git a/src/test/miniscript_tests.cpp b/src/test/miniscript_tests.cpp index 47fc45df4a..e7e32ea2b4 100644 --- a/src/test/miniscript_tests.cpp +++ b/src/test/miniscript_tests.cpp @@ -393,13 +393,25 @@ void TestSatisfy(const KeyConverter& converter, const std::string& testcase, con // Test non-malleable satisfaction. ScriptError serror; bool res = VerifyScript(CScript(), script_pubkey, &witness_nonmal, STANDARD_SCRIPT_VERIFY_FLAGS, checker, &serror); - // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions(). - if (node->ValidSatisfactions()) BOOST_CHECK(res); + // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions(), unless REDUCED_DATA rules are violated. + if (node->ValidSatisfactions()) { + BOOST_CHECK(res || + serror == ScriptError::SCRIPT_ERR_PUSH_SIZE || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS || + serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF); + } // More detailed: non-malleable satisfactions must be valid, or could fail with ops count error (if CheckOpsLimit failed), - // or with a stack size error (if CheckStackSize check fails). + // or with a stack size error (if CheckStackSize check fails), or with REDUCED_DATA-related errors. BOOST_CHECK(res || (!node->CheckOpsLimit() && serror == ScriptError::SCRIPT_ERR_OP_COUNT) || - (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE)); + (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE) || + (serror == ScriptError::SCRIPT_ERR_PUSH_SIZE) || + (serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM) || + (serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION) || + (serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS) || + (serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF)); } if (mal_success && (!nonmal_success || witness_mal.stack != witness_nonmal.stack)) { @@ -407,8 +419,15 @@ void TestSatisfy(const KeyConverter& converter, const std::string& testcase, con ScriptError serror; bool res = VerifyScript(CScript(), script_pubkey, &witness_mal, STANDARD_SCRIPT_VERIFY_FLAGS, checker, &serror); // Malleable satisfactions are not guaranteed to be valid under any conditions, but they can only - // fail due to stack or ops limits. - BOOST_CHECK(res || serror == ScriptError::SCRIPT_ERR_OP_COUNT || serror == ScriptError::SCRIPT_ERR_STACK_SIZE); + // fail due to stack or ops limits, or REDUCED_DATA-related errors. + BOOST_CHECK(res || + serror == ScriptError::SCRIPT_ERR_OP_COUNT || + serror == ScriptError::SCRIPT_ERR_STACK_SIZE || + serror == ScriptError::SCRIPT_ERR_PUSH_SIZE || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS || + serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF); } if (node->IsSane()) { diff --git a/src/test/net_peer_connection_tests.cpp b/src/test/net_peer_connection_tests.cpp index ad675b5f6f..d9ddeb8e1f 100644 --- a/src/test/net_peer_connection_tests.cpp +++ b/src/test/net_peer_connection_tests.cpp @@ -72,7 +72,8 @@ void AddPeer(NodeId& id, std::vector& nodes, PeerManager& peerman, Connm CAddress{}, /*addrNameIn=*/"", conn_type, - /*inbound_onion=*/inbound_onion}); + /*inbound_onion=*/inbound_onion, + /*network_key=*/0}); CNode& node = *nodes.back(); node.SetCommonVersion(PROTOCOL_VERSION); diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 62e541b5b3..78eeea0ade 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -67,7 +67,8 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) CAddress(), pszDest, ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false); + /*inbound_onion=*/false, + /*network_key=*/0); BOOST_CHECK(pnode1->IsFullOutboundConn() == true); BOOST_CHECK(pnode1->IsManualConn() == false); BOOST_CHECK(pnode1->IsBlockOnlyConn() == false); @@ -85,7 +86,8 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) CAddress(), pszDest, ConnectionType::INBOUND, - /*inbound_onion=*/false); + /*inbound_onion=*/false, + /*network_key=*/1); BOOST_CHECK(pnode2->IsFullOutboundConn() == false); BOOST_CHECK(pnode2->IsManualConn() == false); BOOST_CHECK(pnode2->IsBlockOnlyConn() == false); @@ -103,7 +105,8 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) CAddress(), pszDest, ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false); + /*inbound_onion=*/false, + /*network_key=*/2); BOOST_CHECK(pnode3->IsFullOutboundConn() == true); BOOST_CHECK(pnode3->IsManualConn() == false); BOOST_CHECK(pnode3->IsBlockOnlyConn() == false); @@ -121,7 +124,8 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) CAddress(), pszDest, ConnectionType::INBOUND, - /*inbound_onion=*/true); + /*inbound_onion=*/true, + /*network_key=*/3); BOOST_CHECK(pnode4->IsFullOutboundConn() == false); BOOST_CHECK(pnode4->IsManualConn() == false); BOOST_CHECK(pnode4->IsBlockOnlyConn() == false); @@ -613,7 +617,8 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test) CAddress{}, /*pszDest=*/std::string{}, ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false); + /*inbound_onion=*/false, + /*network_key=*/0); pnode->fSuccessfullyConnected.store(true); // the peer claims to be reaching us via IPv6 @@ -667,7 +672,8 @@ BOOST_AUTO_TEST_CASE(get_local_addr_for_peer_port) /*addrBindIn=*/CService{}, /*addrNameIn=*/std::string{}, /*conn_type_in=*/ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/0}; peer_out.fSuccessfullyConnected = true; peer_out.SetAddrLocal(peer_us); @@ -688,7 +694,8 @@ BOOST_AUTO_TEST_CASE(get_local_addr_for_peer_port) /*addrBindIn=*/CService{}, /*addrNameIn=*/std::string{}, /*conn_type_in=*/ConnectionType::INBOUND, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/1}; peer_in.fSuccessfullyConnected = true; peer_in.SetAddrLocal(peer_us); @@ -792,6 +799,36 @@ BOOST_AUTO_TEST_CASE(LocalAddress_BasicLifecycle) BOOST_CHECK(!IsLocal(addr)); } +BOOST_AUTO_TEST_CASE(LocalAddress_nScore_Overflow) +{ + g_reachable_nets.Add(NET_IPV4); + CService addr{UtilBuildAddress(0x002, 0x001, 0x001, 0x001), 1000}; // 2.1.1.1:1000 + + // SeenLocal increments when nScore is below max + const int initial_score = 1000; + BOOST_REQUIRE(AddLocal(addr, initial_score)); + BOOST_REQUIRE(IsLocal(addr)); + BOOST_CHECK_EQUAL(GetnScore(addr), initial_score); + + // SeenLocal increments the score + BOOST_CHECK(SeenLocal(addr)); + BOOST_CHECK_EQUAL(GetnScore(addr), initial_score + 1); + + // SeenLocal saturates at max + RemoveLocal(addr); + BOOST_REQUIRE(AddLocal(addr, std::numeric_limits::max())); + BOOST_CHECK_EQUAL(GetnScore(addr), std::numeric_limits::max()); + + // a couple increments should saturate + for (int i = 0; i < 2; ++i) { + BOOST_CHECK(SeenLocal(addr)); + BOOST_CHECK_EQUAL(GetnScore(addr), std::numeric_limits::max()); + } + + RemoveLocal(addr); + BOOST_CHECK(!IsLocal(addr)); +} + BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) { LOCK(NetEventsInterface::g_msgproc_mutex); @@ -807,7 +844,7 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) // Pretend that we bound to this port. const uint16_t bind_port = 20001; m_node.args->ForceSetArg("-bind", strprintf("3.4.5.6:%u", bind_port)); - m_node.args->ForceSetArg("-capturemessages", "1"); + m_node.connman->SetCaptureMessages(true); // Our address:port as seen from the peer - 2.3.4.5:20002 (different from the above). in_addr peer_us_addr; @@ -825,9 +862,10 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) /*addrBindIn=*/CService{}, /*addrNameIn=*/std::string{}, /*conn_type_in=*/ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/2}; - const uint64_t services{NODE_NETWORK | NODE_WITNESS}; + const uint64_t services{NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA}; const int64_t time{0}; // Force ChainstateManager::IsInitialBlockDownload() to return false. @@ -835,7 +873,7 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) auto& chainman = static_cast(*m_node.chainman); chainman.JumpOutOfIbd(); - m_node.peerman->InitializeNode(peer, NODE_NETWORK); + m_node.peerman->InitializeNode(peer, ServiceFlags(NODE_NETWORK | NODE_REDUCED_DATA)); std::atomic interrupt_dummy{false}; std::chrono::microseconds time_received_dummy{0}; @@ -884,7 +922,7 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) CaptureMessage = CaptureMessageOrig; chainman.ResetIbd(); - m_node.args->ForceSetArg("-capturemessages", "0"); + m_node.connman->SetCaptureMessages(false); m_node.args->ForceSetArg("-bind", ""); } @@ -900,7 +938,8 @@ BOOST_AUTO_TEST_CASE(advertise_local_address) CAddress{}, /*pszDest=*/std::string{}, ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false); + /*inbound_onion=*/false, + /*network_key=*/0); }; g_reachable_nets.Add(NET_CJDNS); diff --git a/src/test/peerman_tests.cpp b/src/test/peerman_tests.cpp index e4fa6d20c9..052e8bf354 100644 --- a/src/test/peerman_tests.cpp +++ b/src/test/peerman_tests.cpp @@ -35,6 +35,7 @@ BOOST_AUTO_TEST_CASE(connections_desirable_service_flags) auto consensus = m_node.chainman->GetParams().GetConsensus(); // Check we start connecting to full nodes + // Note: NODE_REDUCED_DATA requirement is enforced separately in VERSION processing ServiceFlags peer_flags{NODE_WITNESS | NODE_NETWORK_LIMITED}; BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS)); diff --git a/src/test/random_tests.cpp b/src/test/random_tests.cpp index 3d8b543e64..538d41125a 100644 --- a/src/test/random_tests.cpp +++ b/src/test/random_tests.cpp @@ -58,7 +58,7 @@ BOOST_AUTO_TEST_CASE(fastrandom_tests_deterministic) BOOST_CHECK_EQUAL(ctx1.rand32(), ctx2.rand32()); BOOST_CHECK_EQUAL(ctx1.rand64(), ctx2.rand64()); BOOST_CHECK_EQUAL(ctx1.randbits(3), ctx2.randbits(3)); - BOOST_CHECK(ctx1.randbytes(17) == ctx2.randbytes(17)); + BOOST_CHECK(std::ranges::equal(ctx1.randbytes(17), ctx2.randbytes<17>())); // check vector/array behavior symmetry BOOST_CHECK(ctx1.rand256() == ctx2.rand256()); BOOST_CHECK_EQUAL(ctx1.randbits(7), ctx2.randbits(7)); BOOST_CHECK(ctx1.randbytes(128) == ctx2.randbytes(128)); diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp index e9ce82ca8a..042a6c6275 100644 --- a/src/test/script_standard_tests.cpp +++ b/src/test/script_standard_tests.cpp @@ -385,9 +385,10 @@ BOOST_AUTO_TEST_CASE(script_standard_taproot_builder) BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({2,2,0}), false); BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({2,2,1}), true); BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({2,2,2}), false); - BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({2,2,2,3,4,5,6,7,8,9,10,11,12,14,14,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,31,31,31,31,31,31,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,128}), true); - BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({128,128,127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1}), true); - BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({129,129,128,127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1}), false); + // REDUCED_DATA limits Taproot tree depth to 7 instead of 128 + BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({2,2,2,3,4,5,6,7,7}), true); + BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({7,7,6,5,4,3,2,1}), true); + BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({8,8,7,6,5,4,3,2,1}), false); XOnlyPubKey key_inner{"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"_hex_u8}; XOnlyPubKey key_1{"c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5"_hex_u8}; diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index d3320878ec..6e2ec800e7 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -207,4 +207,94 @@ BOOST_AUTO_TEST_CASE(sighash_from_data) BOOST_CHECK_MESSAGE(sh.GetHex() == sigHashHex, strTest); } } + +BOOST_AUTO_TEST_CASE(sighash_caching) +{ + // Get a script, transaction and parameters as inputs to the sighash function. + CScript scriptcode; + RandomScript(scriptcode); + CScript diff_scriptcode{scriptcode}; + diff_scriptcode << OP_1; + CMutableTransaction tx; + RandomTransaction(tx, /*fSingle=*/false); + const auto in_index{static_cast(m_rng.randrange(tx.vin.size()))}; + const auto amount{m_rng.rand()}; + + // Exercise the sighash function under both legacy and segwit v0. + for (const auto sigversion: {SigVersion::BASE, SigVersion::WITNESS_V0}) { + // For each, run it against all the 6 standard hash types and a few additional random ones. + std::vector hash_types{{SIGHASH_ALL, SIGHASH_SINGLE, SIGHASH_NONE, SIGHASH_ALL | SIGHASH_ANYONECANPAY, + SIGHASH_SINGLE | SIGHASH_ANYONECANPAY, SIGHASH_NONE | SIGHASH_ANYONECANPAY, + SIGHASH_ANYONECANPAY, 0, std::numeric_limits::max()}}; + for (int i{0}; i < 10; ++i) { + hash_types.push_back(i % 2 == 0 ? m_rng.rand() : m_rng.rand()); + } + + // Reuse the same cache across script types. This must not cause any issue as the cached value for one hash type must never + // be confused for another (instantiating the cache within the loop instead would prevent testing this). + SigHashCache cache; + for (const auto hash_type: hash_types) { + const bool expect_one{sigversion == SigVersion::BASE && ((hash_type & 0x1f) == SIGHASH_SINGLE) && in_index >= tx.vout.size()}; + + // The result of computing the sighash should be the same with or without cache. + const auto sighash_with_cache{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)}; + const auto sighash_no_cache{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, nullptr)}; + BOOST_CHECK_EQUAL(sighash_with_cache, sighash_no_cache); + + // Calling the cached version again should return the same value again. + BOOST_CHECK_EQUAL(sighash_with_cache, SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)); + + // While here we might as well also check that the result for legacy is the same as for the old SignatureHash() function. + if (sigversion == SigVersion::BASE) { + BOOST_CHECK_EQUAL(sighash_with_cache, SignatureHashOld(scriptcode, CTransaction(tx), in_index, hash_type)); + } + + // Calling with a different scriptcode (for instance in case a CODESEP is encountered) will not return the cache value but + // overwrite it. The sighash will always be different except in case of legacy SIGHASH_SINGLE bug. + const auto sighash_with_cache2{SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)}; + const auto sighash_no_cache2{SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, nullptr)}; + BOOST_CHECK_EQUAL(sighash_with_cache2, sighash_no_cache2); + if (!expect_one) { + BOOST_CHECK_NE(sighash_with_cache, sighash_with_cache2); + } else { + BOOST_CHECK_EQUAL(sighash_with_cache, sighash_with_cache2); + BOOST_CHECK_EQUAL(sighash_with_cache, uint256::ONE); + } + + // Calling the cached version again should return the same value again. + BOOST_CHECK_EQUAL(sighash_with_cache2, SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)); + + // And if we store a different value for this scriptcode and hash type it will return that instead. + { + HashWriter h{}; + h << 42; + cache.Store(hash_type, scriptcode, h); + const auto stored_hash{h.GetHash()}; + BOOST_CHECK(cache.Load(hash_type, scriptcode, h)); + const auto loaded_hash{h.GetHash()}; + BOOST_CHECK_EQUAL(stored_hash, loaded_hash); + } + + // And using this mutated cache with the sighash function will return the new value (except in the legacy SIGHASH_SINGLE bug + // case in which it'll return 1). + if (!expect_one) { + BOOST_CHECK_NE(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), sighash_with_cache); + HashWriter h{}; + BOOST_CHECK(cache.Load(hash_type, scriptcode, h)); + h << hash_type; + const auto new_hash{h.GetHash()}; + BOOST_CHECK_EQUAL(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), new_hash); + } else { + BOOST_CHECK_EQUAL(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), uint256::ONE); + } + + // Wipe the cache and restore the correct cached value for this scriptcode and hash_type before starting the next iteration. + HashWriter dummy{}; + cache.Store(hash_type, diff_scriptcode, dummy); + (void)SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache); + BOOST_CHECK(cache.Load(hash_type, scriptcode, dummy) || expect_one); + } + } +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index eaf8f52aae..2068e64316 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -8,30 +8,123 @@ #include #include #include +#include #include #include using namespace std::string_literals; +using namespace util::hex_literals; BOOST_FIXTURE_TEST_SUITE(streams_tests, BasicTestingSetup) +// Test that obfuscation can be properly reverted even with random chunk sizes. +BOOST_AUTO_TEST_CASE(xor_roundtrip_random_chunks) +{ + auto apply_random_xor_chunks{[&](std::span target, const Obfuscation& obfuscation) { + for (size_t offset{0}; offset < target.size();) { + const size_t chunk_size{1 + m_rng.randrange(target.size() - offset)}; + obfuscation(target.subspan(offset, chunk_size), offset); + offset += chunk_size; + } + }}; + + for (size_t test{0}; test < 100; ++test) { + const size_t write_size{1 + m_rng.randrange(100U)}; + const std::vector original{m_rng.randbytes(write_size)}; + std::vector roundtrip{original}; + + const auto key_bytes{m_rng.randbool() ? m_rng.randbytes() : std::array{}}; + const Obfuscation obfuscation{key_bytes}; + apply_random_xor_chunks(roundtrip, obfuscation); + + const bool key_all_zeros{std::ranges::all_of( + std::span{key_bytes}.first(std::min(write_size, Obfuscation::KEY_SIZE)), [](auto b) { return b == std::byte{0}; })}; + BOOST_CHECK(key_all_zeros ? original == roundtrip : original != roundtrip); + + apply_random_xor_chunks(roundtrip, obfuscation); + BOOST_CHECK(original == roundtrip); + } +} + +// Compares optimized obfuscation against a trivial, byte-by-byte reference implementation +// with random offsets to ensure proper handling of key wrapping. +BOOST_AUTO_TEST_CASE(xor_bytes_reference) +{ + auto expected_xor{[](std::span target, std::span obfuscation, size_t key_offset) { + for (auto& b : target) { + b ^= obfuscation[key_offset++ % obfuscation.size()]; + } + }}; + + for (size_t test{0}; test < 100; ++test) { + const size_t write_size{1 + m_rng.randrange(100U)}; + const size_t key_offset{m_rng.randrange(3 * Obfuscation::KEY_SIZE)}; // Make sure the key can wrap around + const size_t write_offset{std::min(write_size, m_rng.randrange(Obfuscation::KEY_SIZE * 2))}; // Write unaligned data + + const auto key_bytes{m_rng.randbool() ? m_rng.randbytes() : std::array{}}; + const Obfuscation obfuscation{key_bytes}; + std::vector expected{m_rng.randbytes(write_size)}; + std::vector actual{expected}; + + expected_xor(std::span{expected}.subspan(write_offset), key_bytes, key_offset); + obfuscation(std::span{actual}.subspan(write_offset), key_offset); + + BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), actual.begin(), actual.end()); + } +} + +BOOST_AUTO_TEST_CASE(obfuscation_hexkey) +{ + const auto key_bytes{m_rng.randbytes()}; + + const Obfuscation obfuscation{key_bytes}; + BOOST_CHECK_EQUAL(obfuscation.HexKey(), HexStr(key_bytes)); +} + +BOOST_AUTO_TEST_CASE(obfuscation_serialize) +{ + const Obfuscation original{m_rng.randbytes()}; + + // Serialization + DataStream ds; + ds << original; + + BOOST_CHECK_EQUAL(ds.size(), 1 + Obfuscation::KEY_SIZE); // serialized as a vector + + // Deserialization + Obfuscation recovered{}; + ds >> recovered; + + BOOST_CHECK_EQUAL(recovered.HexKey(), original.HexKey()); +} + +BOOST_AUTO_TEST_CASE(obfuscation_empty) +{ + const Obfuscation null_obf{}; + BOOST_CHECK(!null_obf); + + const Obfuscation non_null_obf{"ff00ff00ff00ff00"_hex}; + BOOST_CHECK(non_null_obf); +} + BOOST_AUTO_TEST_CASE(xor_file) { fs::path xor_path{m_args.GetDataDirBase() / "test_xor.bin"}; auto raw_file{[&](const auto& mode) { return fsbridge::fopen(xor_path, mode); }}; const std::vector test1{1, 2, 3}; const std::vector test2{4, 5}; - const std::vector xor_pat{std::byte{0xff}, std::byte{0x00}}; + const Obfuscation xor_pat{"ff00ff00ff00ff00"_hex}; + { // Check errors for missing file AutoFile xor_file{raw_file("rb"), xor_pat}; - BOOST_CHECK_EXCEPTION(xor_file << std::byte{}, std::ios_base::failure, HasReason{"AutoFile::write: file handle is nullpt"}); - BOOST_CHECK_EXCEPTION(xor_file >> std::byte{}, std::ios_base::failure, HasReason{"AutoFile::read: file handle is nullpt"}); - BOOST_CHECK_EXCEPTION(xor_file.ignore(1), std::ios_base::failure, HasReason{"AutoFile::ignore: file handle is nullpt"}); + BOOST_CHECK_EXCEPTION(xor_file << std::byte{}, std::ios_base::failure, HasReason{"AutoFile::write: file handle is nullptr"}); + BOOST_CHECK_EXCEPTION(xor_file >> std::byte{}, std::ios_base::failure, HasReason{"AutoFile::read: file handle is nullptr"}); + BOOST_CHECK_EXCEPTION(xor_file.ignore(1), std::ios_base::failure, HasReason{"AutoFile::ignore: file handle is nullptr"}); } { -#ifdef __MINGW64__ +#if 0 // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210 const char* mode = "wb"; #else @@ -228,7 +321,7 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor) // Degenerate case { DataStream ds{in}; - ds.Xor({0x00, 0x00}); + Obfuscation{}(ds); BOOST_CHECK_EQUAL(""s, ds.str()); } @@ -237,8 +330,10 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor) // Single character key { + const Obfuscation obfuscation{"ffffffffffffffff"_hex}; + DataStream ds{in}; - ds.Xor({0xff}); + obfuscation(ds); BOOST_CHECK_EQUAL("\xf0\x0f"s, ds.str()); } @@ -249,8 +344,10 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor) in.push_back(std::byte{0x0f}); { + const Obfuscation obfuscation{"ff0fff0fff0fff0f"_hex}; + DataStream ds{in}; - ds.Xor({0xff, 0x0f}); + obfuscation(ds); BOOST_CHECK_EQUAL("\x0f\x00"s, ds.str()); } } @@ -563,7 +660,7 @@ BOOST_AUTO_TEST_CASE(buffered_reader_matches_autofile_random_content) const FlatFilePos pos{0, 0}; const FlatFileSeq test_file{m_args.GetDataDirBase(), "buffered_file_test_random", node::BLOCKFILE_CHUNK_SIZE}; - const std::vector obfuscation{m_rng.randbytes(8)}; + const Obfuscation obfuscation{m_rng.randbytes()}; // Write out the file with random content { @@ -618,7 +715,7 @@ BOOST_AUTO_TEST_CASE(buffered_writer_matches_autofile_random_content) const FlatFileSeq test_buffered{m_args.GetDataDirBase(), "buffered_write_test", node::BLOCKFILE_CHUNK_SIZE}; const FlatFileSeq test_direct{m_args.GetDataDirBase(), "direct_write_test", node::BLOCKFILE_CHUNK_SIZE}; - const std::vector obfuscation{m_rng.randbytes(8)}; + const Obfuscation obfuscation{m_rng.randbytes()}; { DataBuffer test_data{m_rng.randbytes(file_size)}; diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index 5d3d999f55..58ed2a9c1b 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -71,6 +71,7 @@ static std::map mapFlagNames = { {std::string("DISCOURAGE_UPGRADABLE_PUBKEYTYPE"), (unsigned int)SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE}, {std::string("DISCOURAGE_OP_SUCCESS"), (unsigned int)SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS}, {std::string("DISCOURAGE_UPGRADABLE_TAPROOT_VERSION"), (unsigned int)SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION}, + {std::string("REDUCED_DATA"), (unsigned int)SCRIPT_VERIFY_REDUCED_DATA}, }; unsigned int ParseScriptFlags(std::string strFlags) @@ -1236,4 +1237,159 @@ BOOST_AUTO_TEST_CASE(max_standard_legacy_sigops) BOOST_CHECK(!::AreInputsStandard(CTransaction(tx_max_sigops), coins, mempool_opts)); } +/** Sanity check the return value of SpendsNonAnchorWitnessProg for various output types. */ +BOOST_AUTO_TEST_CASE(spends_witness_prog) +{ + CCoinsView coins_dummy; + CCoinsViewCache coins(&coins_dummy); + CKey key; + key.MakeNewKey(true); + const CPubKey pubkey{key.GetPubKey()}; + CMutableTransaction tx_create{}, tx_spend{}; + tx_create.vout.emplace_back(0, CScript{}); + tx_spend.vin.emplace_back(Txid{}, 0); + std::vector> sol_dummy; + + // CNoDestination, PubKeyDestination, PKHash, ScriptHash, WitnessV0ScriptHash, WitnessV0KeyHash, + // WitnessV1Taproot, PayToAnchor, WitnessUnknown. + static_assert(std::variant_size_v == 9); + + // Go through all defined output types and sanity check SpendsNonAnchorWitnessProg. + + // P2PK + tx_create.vout[0].scriptPubKey = GetScriptForDestination(PubKeyDestination{pubkey}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::PUBKEY); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2PKH + tx_create.vout[0].scriptPubKey = GetScriptForDestination(PKHash{pubkey}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::PUBKEYHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH + auto redeem_script{CScript{} << OP_1 << OP_CHECKSIG}; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash{redeem_script}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << OP_0 << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + + // native P2WSH + const auto witness_script{CScript{} << OP_12 << OP_HASH160 << OP_DUP << OP_EQUAL}; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash{witness_script}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V0_SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2WSH + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // native P2WPKH + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV0KeyHash{pubkey}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V0_KEYHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2WPKH + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2TR + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV1Taproot{XOnlyPubKey{pubkey}}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V1_TAPROOT); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2TR (undefined, non-standard) + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2A + tx_create.vout[0].scriptPubKey = GetScriptForDestination(PayToAnchor{}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::ANCHOR); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2A (undefined, non-standard) + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + + // Undefined version 1 witness program + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessUnknown{1, {0x42, 0x42}}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_UNKNOWN); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped undefined version 1 witness program + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // Various undefined version >1 32-byte witness programs. + const auto program{ToByteVector(XOnlyPubKey{pubkey})}; + for (int i{2}; i <= 16; ++i) { + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessUnknown{i, program}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_UNKNOWN); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // It's also detected within P2SH. + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + } +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index d22b815fcd..cf93d4702c 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -2,9 +2,11 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include #include #include #include +#include