From 10cbf2255d9e74166f5f41e82fefa4a7f890e73f Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 12 Feb 2025 17:39:52 +0100 Subject: [PATCH 001/356] ci: Use APT_LLVM_V in msan task Also, use update-alternatives to avoid having to manually specify clang-${APT_LLVM_V} or llvm-symbolizer-${APT_LLVM_V} everywhere. Github-Pull: #32999 Rebased-From: fad040a5787a8ac0a13aef5c54e5a675de239e92 --- ci/test/00_setup_env_native_asan.sh | 4 +-- ci/test/00_setup_env_native_fuzz.sh | 5 ++-- ci/test/00_setup_env_native_msan.sh | 3 ++- ci/test/00_setup_env_native_tsan.sh | 2 +- ci/test/01_base_install.sh | 40 ++++++++++++++++++----------- 5 files changed, 32 insertions(+), 22 deletions(-) diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index ead550a43c..9f562df464 100755 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -26,8 +26,8 @@ export GOAL="install" export BITCOIN_CONFIG="\ -DWITH_USDT=ON -DWITH_ZMQ=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF -DBUILD_GUI=ON \ -DSANITIZERS=address,float-divide-by-zero,integer,undefined \ - -DCMAKE_C_COMPILER=clang-${APT_LLVM_V} \ - -DCMAKE_CXX_COMPILER=clang++-${APT_LLVM_V} \ + -DCMAKE_C_COMPILER=clang \ + -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \ -DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern -Wno-error=deprecated-declarations' \ -DAPPEND_CXXFLAGS='-std=c++23' \ diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index c5220211fc..f80c4d988d 100755 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -19,9 +19,8 @@ export CI_CONTAINER_CAP="--cap-add SYS_PTRACE" # If run with (ASan + LSan), the export BITCOIN_CONFIG="\ -DBUILD_FOR_FUZZING=ON \ -DSANITIZERS=fuzzer,address,undefined,float-divide-by-zero,integer \ - -DCMAKE_C_COMPILER=clang-${APT_LLVM_V} \ - -DCMAKE_CXX_COMPILER=clang++-${APT_LLVM_V} \ + -DCMAKE_C_COMPILER=clang \ + -DCMAKE_CXX_COMPILER=clang++ \ -DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \ -DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern' \ " -export LLVM_SYMBOLIZER_PATH="/usr/bin/llvm-symbolizer-${APT_LLVM_V}" diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh index 8784aaa5b7..effd8a9413 100755 --- a/ci/test/00_setup_env_native_msan.sh +++ b/ci/test/00_setup_env_native_msan.sh @@ -7,13 +7,14 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" +export APT_LLVM_V="20" LIBCXX_DIR="/msan/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" export CONTAINER_NAME="ci_native_msan" -export PACKAGES="ninja-build" +export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev ninja-build" # BDB generates false-positives and will be removed in future export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" export GOAL="install" diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index b341adfec5..c8d9c8455f 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -10,7 +10,7 @@ export CONTAINER_NAME=ci_native_tsan export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" export APT_LLVM_V="20" export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libc++abi-${APT_LLVM_V}-dev libc++-${APT_LLVM_V}-dev python3-zmq" -export DEP_OPTS="CC=clang-${APT_LLVM_V} CXX='clang++-${APT_LLVM_V} -stdlib=libc++'" +export DEP_OPTS="CC=clang CXX='clang++ -stdlib=libc++'" export GOAL="install" export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DSANITIZERS=thread \ -DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 1344563268..36a7c43b3f 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -43,27 +43,37 @@ elif [ "$CI_OS_NAME" != "macos" ]; then ${CI_RETRY_EXE} bash -c "apt-get install --no-install-recommends --no-upgrade -y $PACKAGES $CI_BASE_PACKAGES" fi +if [ -n "${APT_LLVM_V}" ]; then + update-alternatives --install /usr/bin/clang++ clang++ "/usr/bin/clang++-${APT_LLVM_V}" 100 + update-alternatives --install /usr/bin/clang clang "/usr/bin/clang-${APT_LLVM_V}" 100 + update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer "/usr/bin/llvm-symbolizer-${APT_LLVM_V}" 100 +fi + if [ -n "$PIP_PACKAGES" ]; then # shellcheck disable=SC2086 ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES fi if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.0" /msan/llvm-project - - cmake -G Ninja -B /msan/clang_build/ \ - -DLLVM_ENABLE_PROJECTS="clang" \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_TARGETS_TO_BUILD=Native \ - -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ - -S /msan/llvm-project/llvm - - ninja -C /msan/clang_build/ "$MAKEJOBS" - ninja -C /msan/clang_build/ install-runtimes - - update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100 - update-alternatives --install /usr/bin/clang clang /msan/clang_build/bin/clang 100 - update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100 + if [ -n "${APT_LLVM_V}" ]; then + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-$( clang --version | sed --silent 's@.*clang version \([0-9.]*\).*@\1@p' )" /msan/llvm-project + else + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.8" /msan/llvm-project + + cmake -G Ninja -B /msan/clang_build/ \ + -DLLVM_ENABLE_PROJECTS="clang" \ + -DCMAKE_BUILD_TYPE=Release \ + -DLLVM_TARGETS_TO_BUILD=Native \ + -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ + -S /msan/llvm-project/llvm + + ninja -C /msan/clang_build/ "$MAKEJOBS" + ninja -C /msan/clang_build/ install-runtimes + + update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100 + update-alternatives --install /usr/bin/clang clang /msan/clang_build/bin/clang 100 + update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100 + fi cmake -G Ninja -B /msan/cxx_build/ \ -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \ From 0fba5ae02101b358aa4938d35471356b75e0e615 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 30 Jul 2025 11:10:16 +0100 Subject: [PATCH 002/356] ci: allow libc++ instrumentation other than msan Github-Pull: #33099 Rebased-From: 6653cafd0b70b0e7a29c6cfe236d3bf9d1bce91e --- ci/test/00_setup_env_native_fuzz_with_msan.sh | 4 +-- ci/test/00_setup_env_native_msan.sh | 4 +-- ci/test/01_base_install.sh | 32 +++++++++---------- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh index a6e53dc8a2..27b704017c 100755 --- a/ci/test/00_setup_env_native_fuzz_with_msan.sh +++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -LIBCXX_DIR="/msan/cxx_build/" +LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" @@ -27,7 +27,7 @@ export BITCOIN_CONFIG="\ -DSANITIZERS=fuzzer,memory \ -DAPPEND_CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE -U_FORTIFY_SOURCE' \ " -export USE_MEMORY_SANITIZER="true" +export USE_INSTRUMENTED_LIBCPP="MemoryWithOrigins" export RUN_UNIT_TESTS="false" export RUN_FUNCTIONAL_TESTS="false" export RUN_FUZZ_TESTS=true diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh index effd8a9413..b450a2ea1e 100755 --- a/ci/test/00_setup_env_native_msan.sh +++ b/ci/test/00_setup_env_native_msan.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" export APT_LLVM_V="20" -LIBCXX_DIR="/msan/cxx_build/" +LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" @@ -27,4 +27,4 @@ export BITCOIN_CONFIG="\ -DSANITIZERS=memory \ -DAPPEND_CPPFLAGS='-U_FORTIFY_SOURCE' \ " -export USE_MEMORY_SANITIZER="true" +export USE_INSTRUMENTED_LIBCPP="MemoryWithOrigins" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 36a7c43b3f..4746a1f69d 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -54,31 +54,31 @@ if [ -n "$PIP_PACKAGES" ]; then ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES fi -if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then +if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then if [ -n "${APT_LLVM_V}" ]; then - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-$( clang --version | sed --silent 's@.*clang version \([0-9.]*\).*@\1@p' )" /msan/llvm-project + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-$( clang --version | sed --silent 's@.*clang version \([0-9.]*\).*@\1@p' )" /llvm-project else - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.8" /msan/llvm-project + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.8" /llvm-project - cmake -G Ninja -B /msan/clang_build/ \ + cmake -G Ninja -B /clang_build/ \ -DLLVM_ENABLE_PROJECTS="clang" \ -DCMAKE_BUILD_TYPE=Release \ -DLLVM_TARGETS_TO_BUILD=Native \ -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ - -S /msan/llvm-project/llvm + -S /llvm-project/llvm - ninja -C /msan/clang_build/ "$MAKEJOBS" - ninja -C /msan/clang_build/ install-runtimes + ninja -C /clang_build/ "$MAKEJOBS" + ninja -C /clang_build/ install-runtimes - update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100 - update-alternatives --install /usr/bin/clang clang /msan/clang_build/bin/clang 100 - update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100 + update-alternatives --install /usr/bin/clang++ clang++ /clang_build/bin/clang++ 100 + update-alternatives --install /usr/bin/clang clang /clang_build/bin/clang 100 + update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /clang_build/bin/llvm-symbolizer 100 fi - cmake -G Ninja -B /msan/cxx_build/ \ + cmake -G Ninja -B /cxx_build/ \ -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \ -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_USE_SANITIZER=MemoryWithOrigins \ + -DLLVM_USE_SANITIZER="${USE_INSTRUMENTED_LIBCPP}" \ -DCMAKE_C_COMPILER=clang \ -DCMAKE_CXX_COMPILER=clang++ \ -DLLVM_TARGETS_TO_BUILD=Native \ @@ -86,13 +86,13 @@ if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then -DLIBCXXABI_USE_LLVM_UNWINDER=OFF \ -DLIBCXX_ABI_DEFINES="_LIBCPP_ABI_BOUNDED_ITERATORS;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_STD_ARRAY;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_STRING;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_VECTOR;_LIBCPP_ABI_BOUNDED_UNIQUE_PTR" \ -DLIBCXX_HARDENING_MODE=debug \ - -S /msan/llvm-project/runtimes + -S /llvm-project/runtimes - ninja -C /msan/cxx_build/ "$MAKEJOBS" + ninja -C /cxx_build/ "$MAKEJOBS" # Clear no longer needed source folder - du -sh /msan/llvm-project - rm -rf /msan/llvm-project + du -sh /llvm-project + rm -rf /llvm-project fi if [[ "${RUN_TIDY}" == "true" ]]; then From f9939cdbe01fa090bd2ece90f5cbfb17120c2f24 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 30 Jul 2025 11:15:33 +0100 Subject: [PATCH 003/356] ci: instrument libc++ in TSAN job Qt is disabled, as the build is now taking a very long time. Github-Pull: #33099 Rebased-From: b09af2ce508185086bb551bfeb1409355c897e7b --- .cirrus.yml | 2 +- ci/test/00_setup_env_native_tsan.sh | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.cirrus.yml b/.cirrus.yml index 393237af66..6e70dc15fe 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -158,7 +158,7 @@ task: FILE_ENV: "./ci/test/00_setup_env_native_previous_releases.sh" task: - name: 'TSan, depends, gui' + name: 'TSan, depends, no gui' << : *GLOBAL_TASK_TEMPLATE persistent_worker: labels: diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index c8d9c8455f..dce63d5151 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -9,8 +9,11 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_tsan export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" export APT_LLVM_V="20" -export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libc++abi-${APT_LLVM_V}-dev libc++-${APT_LLVM_V}-dev python3-zmq" -export DEP_OPTS="CC=clang CXX='clang++ -stdlib=libc++'" +LIBCXX_DIR="/cxx_build/" +LIBCXX_FLAGS="-fsanitize=thread -nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" +export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev python3-zmq ninja-build" +export DEP_OPTS="CC=clang CXX=clang++ CXXFLAGS='${LIBCXX_FLAGS}' NO_QT=1" export GOAL="install" export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DSANITIZERS=thread \ -DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'" +export USE_INSTRUMENTED_LIBCPP="Thread" From 5513516241463333548600f691a861dba4c1d5c5 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 30 Jul 2025 11:15:54 +0100 Subject: [PATCH 004/356] ci: remove DEBUG_LOCKORDER from TSAN job Github-Pull: #33099 Rebased-From: 7aa5b67132dfb71e915675a3dbcb806284e08197 --- ci/test/00_setup_env_native_tsan.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index dce63d5151..7d8d0cf203 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -15,5 +15,5 @@ export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev l export DEP_OPTS="CC=clang CXX=clang++ CXXFLAGS='${LIBCXX_FLAGS}' NO_QT=1" export GOAL="install" export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DSANITIZERS=thread \ --DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'" +-DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'" export USE_INSTRUMENTED_LIBCPP="Thread" From ea40fa95d9af004d85187bee9d8efe278c888d8f Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 26 Aug 2025 16:49:38 +0100 Subject: [PATCH 005/356] ci: use LLVM 21 Github-Pull: #33258 Rebased-From: 4cf0ae474ba03830c86653f1abae4ab4d38c94e4 --- ci/test/00_setup_env_native_asan.sh | 2 +- ci/test/00_setup_env_native_fuzz.sh | 2 +- ci/test/00_setup_env_native_msan.sh | 2 +- ci/test/00_setup_env_native_tsan.sh | 2 +- ci/test/01_base_install.sh | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index 9f562df464..dbfcc259d6 100755 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -19,7 +19,7 @@ else fi export CONTAINER_NAME=ci_native_asan -export APT_LLVM_V="20" +export APT_LLVM_V="21" export PACKAGES="systemtap-sdt-dev clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev python3-zmq qtbase5-dev qttools5-dev qttools5-dev-tools libevent-dev libboost-dev libdb5.3++-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}" export NO_DEPENDS=1 export GOAL="install" diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index f80c4d988d..d81cbcf228 100755 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" export CONTAINER_NAME=ci_native_fuzz -export APT_LLVM_V="20" +export APT_LLVM_V="21" export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libevent-dev libboost-dev libsqlite3-dev" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh index b450a2ea1e..879e82d55a 100755 --- a/ci/test/00_setup_env_native_msan.sh +++ b/ci/test/00_setup_env_native_msan.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -export APT_LLVM_V="20" +export APT_LLVM_V="21" LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index 7d8d0cf203..6286e39d84 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_tsan export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" -export APT_LLVM_V="20" +export APT_LLVM_V="21" LIBCXX_DIR="/cxx_build/" LIBCXX_FLAGS="-fsanitize=thread -nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev python3-zmq ninja-build" diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 4746a1f69d..25a03d5f50 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -58,7 +58,7 @@ if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then if [ -n "${APT_LLVM_V}" ]; then ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-$( clang --version | sed --silent 's@.*clang version \([0-9.]*\).*@\1@p' )" /llvm-project else - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.8" /llvm-project + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.0" /llvm-project cmake -G Ninja -B /clang_build/ \ -DLLVM_ENABLE_PROJECTS="clang" \ From 7c6be9acae5a16956a7f8e53ae3f944a187a6713 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 5 Sep 2025 12:04:09 +0100 Subject: [PATCH 006/356] doc: update release notes for 29.x --- doc/release-notes.md | 183 ++----------------------------------------- 1 file changed, 5 insertions(+), 178 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index 923d34a99b..b73e52dc57 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.1 is now available from: +Bitcoin Core version 29.x is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -37,192 +37,19 @@ unsupported systems. Notable changes =============== -### Mempool Policy - -- The maximum number of potentially executed legacy signature operations in a - single standard transaction is now limited to 2500. Signature operations in all - previous output scripts, in all input scripts, as well as all P2SH redeem - scripts (if there are any) are counted toward the limit. The new limit is - assumed to not affect any known typically formed standard transactions. The - change was done to prepare for a possible BIP54 deployment in the future. - -- #32521 policy: make pathological transactions packed with legacy sigops non-standard - -- The minimum block feerate (`-blockmintxfee`) has been changed to 1 satoshi per kvB. It can still be changed using the -configuration option. - -- The default minimum relay feerate (`-minrelaytxfee`) and incremental relay feerate (`-incrementalrelayfee`) have been -changed to 100 satoshis per kvB. They can still be changed using their respective configuration options, but it is -recommended to change both together if you decide to do so. - - Other minimum feerates (e.g. the dust feerate, the minimum returned by the fee estimator, and all feerates used by - the wallet) remain unchanged. The mempool minimum feerate still changes in response to high volume. - - Note that unless these lower defaults are widely adopted across the network, transactions created with lower fee - rates are not guaranteed to propagate or confirm. The wallet feerates remain unchanged; `-mintxfee` must be changed - before attempting to create transactions with lower feerates using the wallet. - -- #33106 policy: lower the default blockmintxfee, incrementalrelayfee, minrelaytxfee - -### Logging - -Unconditional logging to disk is now rate limited by giving each source location -a quota of 1MiB per hour. Unconditional logging is any logging with a log level -higher than debug, that is `info`, `warning`, and `error`. All logs will be -prefixed with `[*]` if there is at least one source location that is currently -being suppressed. (#32604) - -When `-logsourcelocations` is enabled, the log output now contains the entire -function signature instead of just the function name. (#32604) - -### RPC - -- The `dumptxoutset` RPC now requires a `type` parameter to be specified. To maintain pre - v29.0 behavior, use the `latest` parameter. Documenting this change was missed in the v29.0 - release notes. (#30808) - -### Updated Settings - -- The `-maxmempool` and `-dbcache` startup parameters are now capped on - 32-bit systems to 500MB and 1GiB respectively. - -- #32530 node: cap -maxmempool and -dbcache values for 32-bit - -### Wallet - -- #31757 wallet: fix crash on double block disconnection -- #32553 wallet: Fix logging of wallet version - -### P2P - -- #32826 p2p: add more bad ports - -### Test - -- #32069 test: fix intermittent failure in wallet_reorgsrestore.py -- #32286 test: Handle empty string returned by CLI as None in RPC tests -- #32312 test: Fix feature_pruning test after nTime typo fix -- #32336 test: Suppress upstream -Wduplicate-decl-specifier in bpfcc -- #32463 test: fix an incorrect feature_fee_estimation.py subtest -- #32483 test: fix two intermittent failures in wallet_basic.py -- #32630 test: fix sync function in rpc_psbt.py -- #32765 test: Fix list index out of range error in feature_bip68_sequence.py -- #32742 test: fix catchup loop in outbound eviction functional test -- #32823 test: Fix wait_for_getheaders() call in test_outbound_eviction_blocks_relay_only() -- #32833 test: Add msgtype to msg_generic slots -- #32841 feature_taproot: sample tx version border values more -- #32850 test: check P2SH sigop count for coinbase tx -- #32859 test: correctly detect nonstd TRUC tx vsize in feature_taproot -- #33001 test: Do not pass tests on unhandled exceptions - -### Indexes - -- #33212 index: Don't commit state in BaseIndex::Rewind - -### Util - -- #32248 Remove support for RNDR/RNDRRS for aarch64 - -### Build - -- #32356 cmake: Respect user-provided configuration-specific flags -- #32437 crypto: disable ASan for sha256_sse4 with Clang -- #32469 cmake: Allow WITH_DBUS on all Unix-like systems -- #32439 guix: accomodate migration to codeberg -- #32551 cmake: Add missed SSE41_CXXFLAGS -- #32568 depends: use "mkdir -p" when installing xproto -- #32678 guix: warn and abort when SOURCE_DATE_EPOCH is set -- #32690 depends: fix SHA256SUM command on OpenBSD (use GNU mode output) -- #32716 depends: Override host compilers for FreeBSD and OpenBSD -- #32760 depends: capnp 1.2.0 -- #32798 build: add root dir to CMAKE_PREFIX_PATH in toolchain -- #32805 cmake: Use HINTS instead of PATHS in find_* commands -- #32814 cmake: Explicitly specify Boost_ROOT for Homebrew's package -- #32837 depends: fix libevent _WIN32_WINNT usage -- #32943 depends: Force CMAKE_EXPORT_NO_PACKAGE_REGISTRY=TRUE -- #32954 cmake: Drop no longer necessary "cmakeMinimumRequired" object -- #33073 guix: warn SOURCE_DATE_EPOCH set in guix-codesign - -### Gui - -- #864 Crash fix, disconnect numBlocksChanged() signal during shutdown -- #868 Replace stray tfm::format to cerr with qWarning - -### Doc - -- #32333 doc: Add missing top-level description to pruneblockchain RPC -- #32353 doc: Fix fuzz test_runner.py path -- #32389 doc: Fix test_bitcoin path -- #32607 rpc: Note in fundrawtransaction doc, fee rate is for package -- #32679 doc: update tor docs to use bitcoind binary from path -- #32693 depends: fix cmake compatibility error for freetype -- #32696 doc: make -DWITH_ZMQ=ON explicit on build-unix.md -- #32708 rpc, doc: update listdescriptors RCP help -- #32711 doc: add missing packages for BSDs (cmake, gmake, curl) to depends/README.md -- #32719 doc, windows: CompanyName "Bitcoin" => "Bitcoin Core project" -- #32776 doc: taproot became always active in v24.0 -- #32777 doc: fix Transifex 404s -- #32846 doc: clarify that the "-j N" goes after the "--build build" part -- #32858 doc: Add workaround for vcpkg issue with paths with embedded spaces -- #33070 doc/zmq: fix unix socket path example -- #33088 doc: move cmake -B build -LH up in Unix build docs -- #33133 rpc: fix getpeerinfo ping duration unit docs -- #33119 rpc: Fix 'getdescriptoractivity' RPCHelpMan, add test to verify fix -- #33236 doc: Remove wrong and redundant doxygen tag - ### CI -- #32184 ci: Add workaround for vcpkg's libevent package -- #33261 ci: return to using dash in CentOS job - -### Misc - -- #32187 refactor: Remove spurious virtual from final ~CZMQNotificationInterface -- #32454 tracing: fix invalid argument in mempool_monitor -- #32771 contrib: tracing: Fix read of pmsg_type in p2p_monitor.py -- #33086 contrib: [tracing] fix pointer argument handling in mempool_monitor.py +- #32999 ci: Use APT_LLVM_V in msan task +- #33099 ci: allow for any libc++ intrumentation & use it for TSAN +- #33258 ci: use LLVM 21 Credits ======= Thanks to everyone who directly contributed to this release: -- 0xB10C -- achow101 -- Antoine Poinsot -- benthecarman -- bigspider -- Brandon Odiwuor -- brunoerg -- Bufo -- Christewart -- Crypt-iQ -- davidgumberg -- deadmanoz -- dergoegge -- enirox001 - fanquake -- furszy -- glozow -- instagibbs -- Hennadii Stepanov -- hodlinator -- ismaelsadeeq -- jb55 -- jlopp -- josibake -- laanwj -- luisschwab - MarcoFalke -- Martin Zumsande -- monlovesmango -- nervana21 -- pablomartin4btc -- rkrux -- romanz -- ryanofsky -- Sjors -- theStack -- willcl-ark -- zaidmstrr As well as to everyone that helped with translations on [Transifex](https://explore.transifex.com/bitcoin/bitcoin/). From 2717331981ec94fd616a08f31e643391a2118639 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Mon, 8 Sep 2025 12:11:47 +0100 Subject: [PATCH 007/356] Fix benchmark CSV output The `SHA256AutoDetect` return output is used, among other use cases, to name benchmarks. Using a comma breaks the CSV output. This change replaces the comma with a semicolon, which fixes the issue. Github-Pull: #33340 Rebased-From: 790b440197bde322432a5bab161f1869b667e681 --- src/crypto/sha256.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/crypto/sha256.cpp b/src/crypto/sha256.cpp index 09c5d3123e..c5f495708d 100644 --- a/src/crypto/sha256.cpp +++ b/src/crypto/sha256.cpp @@ -627,7 +627,7 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem Transform = sha256_x86_shani::Transform; TransformD64 = TransformD64Wrapper; TransformD64_2way = sha256d64_x86_shani::Transform_2way; - ret = "x86_shani(1way,2way)"; + ret = "x86_shani(1way;2way)"; have_sse4 = false; // Disable SSE4/AVX2; have_avx2 = false; } @@ -641,14 +641,14 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem #endif #if defined(ENABLE_SSE41) TransformD64_4way = sha256d64_sse41::Transform_4way; - ret += ",sse41(4way)"; + ret += ";sse41(4way)"; #endif } #if defined(ENABLE_AVX2) if (have_avx2 && have_avx && enabled_avx) { TransformD64_8way = sha256d64_avx2::Transform_8way; - ret += ",avx2(8way)"; + ret += ";avx2(8way)"; } #endif #endif // defined(HAVE_GETCPUID) @@ -682,7 +682,7 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem Transform = sha256_arm_shani::Transform; TransformD64 = TransformD64Wrapper; TransformD64_2way = sha256d64_arm_shani::Transform_2way; - ret = "arm_shani(1way,2way)"; + ret = "arm_shani(1way;2way)"; } #endif #endif // DISABLE_OPTIMIZED_SHA256 From 324caa84977cc74ac19df605503483e59739773e Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 10 Sep 2025 09:12:40 +0100 Subject: [PATCH 008/356] ci: always use tag for LLVM checkout Rather than trying to match the apt installed clang version, which is prone to intermittent issues. i.e #33345. Github-Pull: #33364 Rebased-From: b736052e39f1f466f63f261ace3dd2deba171e8a --- ci/test/01_base_install.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 25a03d5f50..1b624f3894 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -55,10 +55,9 @@ if [ -n "$PIP_PACKAGES" ]; then fi if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then + ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.1" /llvm-project + if [ -n "${APT_LLVM_V}" ]; then - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-$( clang --version | sed --silent 's@.*clang version \([0-9.]*\).*@\1@p' )" /llvm-project - else - ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.0" /llvm-project cmake -G Ninja -B /clang_build/ \ -DLLVM_ENABLE_PROJECTS="clang" \ From e97588fc3d1e1a02382312ade7d529c5b4b60016 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 4 Sep 2025 19:25:33 +0000 Subject: [PATCH 009/356] trace: Workaround GCC bug compiling with old systemtap Github-Pull: #33310 Rebased-From: 93a29ff2830162c8129d35c7b9beb43fab984503 --- cmake/module/FindUSDT.cmake | 4 ++++ src/util/trace.h | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/cmake/module/FindUSDT.cmake b/cmake/module/FindUSDT.cmake index 0be7c28ff5..234a099f3f 100644 --- a/cmake/module/FindUSDT.cmake +++ b/cmake/module/FindUSDT.cmake @@ -36,6 +36,10 @@ if(USDT_INCLUDE_DIR) include(CheckCXXSourceCompiles) set(CMAKE_REQUIRED_INCLUDES ${USDT_INCLUDE_DIR}) check_cxx_source_compiles(" + #if defined(__arm__) + # define STAP_SDT_ARG_CONSTRAINT g + #endif + // Setting SDT_USE_VARIADIC lets systemtap (sys/sdt.h) know that we want to use // the optional variadic macros to define tracepoints. #define SDT_USE_VARIADIC 1 diff --git a/src/util/trace.h b/src/util/trace.h index 3deefeade3..ab005dd8bc 100644 --- a/src/util/trace.h +++ b/src/util/trace.h @@ -9,6 +9,13 @@ #ifdef ENABLE_TRACING +// Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103395 +// systemtap 4.6 on 32-bit ARM triggers internal compiler error +// (this workaround is included in systemtap 4.7+) +#if defined(__arm__) +# define STAP_SDT_ARG_CONSTRAINT g +#endif + // Setting SDT_USE_VARIADIC lets systemtap (sys/sdt.h) know that we want to use // the optional variadic macros to define tracepoints. #define SDT_USE_VARIADIC 1 From 9b95ab5e9db1691be5f26fc5bc1c186777d2dc5b Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Wed, 21 May 2025 13:36:43 -0400 Subject: [PATCH 010/356] p2p: Add witness mutation check inside FillBlock Since #29412, we have not allowed mutated blocks to continue being processed immediately the block is received, but this is only done for the legacy BLOCK message. Extend these checks as belt-and-suspenders to not allow similar mutation strategies to affect relay by honest peers by applying the check inside PartiallyDownloadedBlock::FillBlock, immediately before returning READ_STATUS_OK. This also removes the extraneous CheckBlock call. Github-Pull: #32646 Rebased-From: bac9ee4830664c86c1cb3d38a5b19c722aae2f54 --- src/blockencodings.cpp | 17 +++------ src/blockencodings.h | 7 ++-- src/net_processing.cpp | 10 ++++- src/test/blockencodings_tests.cpp | 16 ++++---- src/test/fuzz/partially_downloaded_block.cpp | 39 ++++++-------------- 5 files changed, 37 insertions(+), 52 deletions(-) diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp index 5f4061a71d..5975a99faa 100644 --- a/src/blockencodings.cpp +++ b/src/blockencodings.cpp @@ -180,7 +180,7 @@ bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const return txn_available[index] != nullptr; } -ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector& vtx_missing) +ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector& vtx_missing, bool segwit_active) { if (header.IsNull()) return READ_STATUS_INVALID; @@ -205,16 +205,11 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector< if (vtx_missing.size() != tx_missing_offset) return READ_STATUS_INVALID; - BlockValidationState state; - CheckBlockFn check_block = m_check_block_mock ? m_check_block_mock : CheckBlock; - if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) { - // TODO: We really want to just check merkle tree manually here, - // but that is expensive, and CheckBlock caches a block's - // "checked-status" (in the CBlock?). CBlock should be able to - // check its own merkle root and cache that check. - if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED) - return READ_STATUS_FAILED; // Possible Short ID collision - return READ_STATUS_CHECKBLOCK_FAILED; + // Check for possible mutations early now that we have a seemingly good block + IsBlockMutatedFn check_mutated{m_check_block_mutated_mock ? m_check_block_mutated_mock : IsBlockMutated}; + if (check_mutated(/*block=*/block, + /*check_witness_root=*/segwit_active)) { + return READ_STATUS_FAILED; // Possible Short ID collision } LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size()); diff --git a/src/blockencodings.h b/src/blockencodings.h index c92aa05e80..b1f82d18c5 100644 --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -141,15 +141,16 @@ class PartiallyDownloadedBlock { CBlockHeader header; // Can be overridden for testing - using CheckBlockFn = std::function; - CheckBlockFn m_check_block_mock{nullptr}; + using IsBlockMutatedFn = std::function; + IsBlockMutatedFn m_check_block_mutated_mock{nullptr}; explicit PartiallyDownloadedBlock(CTxMemPool* poolIn) : pool(poolIn) {} // extra_txn is a list of extra orphan/conflicted/etc transactions to look at ReadStatus InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector& extra_txn); bool IsTxAvailable(size_t index) const; - ReadStatus FillBlock(CBlock& block, const std::vector& vtx_missing); + // segwit_active enforces witness mutation checks just before reporting a healthy status + ReadStatus FillBlock(CBlock& block, const std::vector& vtx_missing, bool segwit_active); }; #endif // BITCOIN_BLOCKENCODINGS_H diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 1da3ec9d21..0f1d6d98aa 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3314,7 +3314,11 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl } PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock; - ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn); + + // We should not have gotten this far in compact block processing unless it's attached to a known header + const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(partialBlock.header.hashPrevBlock))}; + ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn, + /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); if (status == READ_STATUS_INVALID) { RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect Misbehaving(peer, "invalid compact block/non-matching block transactions"); @@ -4462,7 +4466,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } std::vector dummy; - status = tempBlock.FillBlock(*pblock, dummy); + const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock))}; + status = tempBlock.FillBlock(*pblock, dummy, + /*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT)); if (status == READ_STATUS_OK) { fBlockReconstructed = true; } diff --git a/src/test/blockencodings_tests.cpp b/src/test/blockencodings_tests.cpp index ed95a8831e..d40a0a94ae 100644 --- a/src/test/blockencodings_tests.cpp +++ b/src/test/blockencodings_tests.cpp @@ -95,21 +95,21 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest) CBlock block2; { PartiallyDownloadedBlock tmp = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions partialBlock = tmp; } // Wrong transaction { PartiallyDownloadedBlock tmp = partialBlock; - partialBlock.FillBlock(block2, {block.vtx[2]}); // Current implementation doesn't check txn here, but don't require that + partialBlock.FillBlock(block2, {block.vtx[2]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that partialBlock = tmp; } bool mutated; BOOST_CHECK(block.hashMerkleRoot != BlockMerkleRoot(block2, &mutated)); CBlock block3; - BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); @@ -182,14 +182,14 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) CBlock block2; { PartiallyDownloadedBlock tmp = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions partialBlock = tmp; } // Wrong transaction { PartiallyDownloadedBlock tmp = partialBlock; - partialBlock.FillBlock(block2, {block.vtx[1]}); // Current implementation doesn't check txn here, but don't require that + partialBlock.FillBlock(block2, {block.vtx[1]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that partialBlock = tmp; } BOOST_CHECK_EQUAL(pool.get(block.vtx[2]->GetHash()).use_count(), SHARED_TX_OFFSET + 2); // +2 because of partialBlock and block2 @@ -198,7 +198,7 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest) CBlock block3; PartiallyDownloadedBlock partialBlockCopy = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString()); BOOST_CHECK(!mutated); @@ -252,7 +252,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest) CBlock block2; PartiallyDownloadedBlock partialBlockCopy = partialBlock; - BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); bool mutated; BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); @@ -300,7 +300,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest) CBlock block2; std::vector vtx_missing; - BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK); + BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing, /*segwit_active=*/true) == READ_STATUS_OK); BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString()); BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString()); BOOST_CHECK(!mutated); diff --git a/src/test/fuzz/partially_downloaded_block.cpp b/src/test/fuzz/partially_downloaded_block.cpp index 82d781cd53..1a06ef8b0a 100644 --- a/src/test/fuzz/partially_downloaded_block.cpp +++ b/src/test/fuzz/partially_downloaded_block.cpp @@ -32,14 +32,10 @@ void initialize_pdb() g_setup = testing_setup.get(); } -PartiallyDownloadedBlock::CheckBlockFn FuzzedCheckBlock(std::optional result) +PartiallyDownloadedBlock::IsBlockMutatedFn FuzzedIsBlockMutated(bool result) { - return [result](const CBlock&, BlockValidationState& state, const Consensus::Params&, bool, bool) { - if (result) { - return state.Invalid(*result); - } - - return true; + return [result](const CBlock& block, bool) { + return result; }; } @@ -111,36 +107,23 @@ FUZZ_TARGET(partially_downloaded_block, .init = initialize_pdb) skipped_missing |= (!pdb.IsTxAvailable(i) && skip); } - // Mock CheckBlock - bool fail_check_block{fuzzed_data_provider.ConsumeBool()}; - auto validation_result = - fuzzed_data_provider.PickValueInArray( - {BlockValidationResult::BLOCK_RESULT_UNSET, - BlockValidationResult::BLOCK_CONSENSUS, - BlockValidationResult::BLOCK_CACHED_INVALID, - BlockValidationResult::BLOCK_INVALID_HEADER, - BlockValidationResult::BLOCK_MUTATED, - BlockValidationResult::BLOCK_MISSING_PREV, - BlockValidationResult::BLOCK_INVALID_PREV, - BlockValidationResult::BLOCK_TIME_FUTURE, - BlockValidationResult::BLOCK_CHECKPOINT, - BlockValidationResult::BLOCK_HEADER_LOW_WORK}); - pdb.m_check_block_mock = FuzzedCheckBlock( - fail_check_block ? - std::optional{validation_result} : - std::nullopt); + bool segwit_active{fuzzed_data_provider.ConsumeBool()}; + + // Mock IsBlockMutated + bool fail_block_mutated{fuzzed_data_provider.ConsumeBool()}; + pdb.m_check_block_mutated_mock = FuzzedIsBlockMutated(fail_block_mutated); CBlock reconstructed_block; - auto fill_status{pdb.FillBlock(reconstructed_block, missing)}; + auto fill_status{pdb.FillBlock(reconstructed_block, missing, segwit_active)}; switch (fill_status) { case READ_STATUS_OK: assert(!skipped_missing); - assert(!fail_check_block); + assert(!fail_block_mutated); assert(block->GetHash() == reconstructed_block.GetHash()); break; case READ_STATUS_CHECKBLOCK_FAILED: [[fallthrough]]; case READ_STATUS_FAILED: - assert(fail_check_block); + assert(fail_block_mutated); break; case READ_STATUS_INVALID: break; From 4c940d47897bc380d3387dd6663c37c46b4020ec Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Tue, 3 Jun 2025 10:29:00 -0400 Subject: [PATCH 011/356] p2p: remove vestigial READ_STATUS_CHECKBLOCK_FAILED Github-Pull: #32646 Rebased-From: 28299ce77636d7563ec545d043cf1b61bd2f01c1 --- src/blockencodings.h | 2 -- src/net_processing.cpp | 18 +----------------- src/test/fuzz/partially_downloaded_block.cpp | 1 - 3 files changed, 1 insertion(+), 20 deletions(-) diff --git a/src/blockencodings.h b/src/blockencodings.h index b1f82d18c5..fce59bc561 100644 --- a/src/blockencodings.h +++ b/src/blockencodings.h @@ -84,8 +84,6 @@ typedef enum ReadStatus_t READ_STATUS_OK, READ_STATUS_INVALID, // Invalid object, peer is sending bogus crap READ_STATUS_FAILED, // Failed to process object - READ_STATUS_CHECKBLOCK_FAILED, // Used only by FillBlock to indicate a - // failure in CheckBlock. } ReadStatus; class CBlockHeaderAndShortTxIDs { diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 0f1d6d98aa..fa27ceb38a 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3335,23 +3335,7 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl return; } } else { - // Block is either okay, or possibly we received - // READ_STATUS_CHECKBLOCK_FAILED. - // Note that CheckBlock can only fail for one of a few reasons: - // 1. bad-proof-of-work (impossible here, because we've already - // accepted the header) - // 2. merkleroot doesn't match the transactions given (already - // caught in FillBlock with READ_STATUS_FAILED, so - // impossible here) - // 3. the block is otherwise invalid (eg invalid coinbase, - // block is too big, too many legacy sigops, etc). - // So if CheckBlock failed, #3 is the only possibility. - // Under BIP 152, we don't discourage the peer unless proof of work is - // invalid (we don't require all the stateless checks to have - // been run). This is handled below, so just treat this as - // though the block was successfully read, and rely on the - // handling in ProcessNewBlock to ensure the block index is - // updated, etc. + // Block is okay for further processing RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer fBlockRead = true; // mapBlockSource is used for potentially punishing peers and diff --git a/src/test/fuzz/partially_downloaded_block.cpp b/src/test/fuzz/partially_downloaded_block.cpp index 1a06ef8b0a..c9635cae8c 100644 --- a/src/test/fuzz/partially_downloaded_block.cpp +++ b/src/test/fuzz/partially_downloaded_block.cpp @@ -121,7 +121,6 @@ FUZZ_TARGET(partially_downloaded_block, .init = initialize_pdb) assert(!fail_block_mutated); assert(block->GetHash() == reconstructed_block.GetHash()); break; - case READ_STATUS_CHECKBLOCK_FAILED: [[fallthrough]]; case READ_STATUS_FAILED: assert(fail_block_mutated); break; From 569ceb0df46fc619eed33f56b5b36f617c37bae7 Mon Sep 17 00:00:00 2001 From: Eugene Siegel Date: Wed, 3 Sep 2025 12:44:23 -0400 Subject: [PATCH 012/356] net: check for empty header before calling FillBlock Previously in debug builds, this would cause an Assume crash if FillBlock had been called previously. This could happen when multiple blocktxn messages were received. Co-Authored-By: Greg Sanders Github-Pull: #33296 Rebased-From: 5e585a0fc4fd68dd7b4982054b34deae2e7aeb89 --- src/net_processing.cpp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index fa27ceb38a..d9c2163c1d 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3315,6 +3315,16 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock; + if (partialBlock.header.IsNull()) { + // It is possible for the header to be empty if a previous call to FillBlock wiped the header, but left + // the PartiallyDownloadedBlock pointer around (i.e. did not call RemoveBlockRequest). In this case, we + // should not call LookupBlockIndex below. + RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); + Misbehaving(peer, "previous compact block reconstruction attempt failed"); + LogDebug(BCLog::NET, "Peer %d sent compact block transactions multiple times", pfrom.GetId()); + return; + } + // We should not have gotten this far in compact block processing unless it's attached to a known header const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(partialBlock.header.hashPrevBlock))}; ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn, @@ -3326,6 +3336,9 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl } else if (status == READ_STATUS_FAILED) { if (first_in_flight) { // Might have collided, fall back to getdata now :( + // We keep the failed partialBlock to disallow processing another compact block announcement from the same + // peer for the same block. We let the full block download below continue under the same m_downloading_since + // timer. std::vector invs; invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash); MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs); From 1288d44804cd6ecd8601d0aef55e6fbf500d2f31 Mon Sep 17 00:00:00 2001 From: Eugene Siegel Date: Wed, 3 Sep 2025 12:44:52 -0400 Subject: [PATCH 013/356] test: send duplicate blocktxn message in p2p_compactblocks.py Add test_multiple_blocktxn_response that checks that the peer is disconnected. Github-Pull: #33296 Rebased-From: 8b6264768030db1840041abeeaeefd6c227a2644 --- test/functional/p2p_compactblocks.py | 42 ++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index ca36b2fbc0..da8a0aed9a 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -566,6 +566,42 @@ def test_incorrect_blocktxn_response(self, test_node): test_node.send_and_ping(msg_block(block)) assert_equal(int(node.getbestblockhash(), 16), block.sha256) + # Multiple blocktxn responses will cause a node to get disconnected. + def test_multiple_blocktxn_response(self, test_node): + node = self.nodes[0] + utxo = self.utxos[0] + + block = self.build_block_with_transactions(node, utxo, 2) + + # Send compact block + comp_block = HeaderAndShortIDs() + comp_block.initialize_from_block(block, prefill_list=[0], use_witness=True) + test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p())) + absolute_indexes = [] + with p2p_lock: + assert "getblocktxn" in test_node.last_message + absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute() + assert_equal(absolute_indexes, [1, 2]) + + # Send a blocktxn that does not succeed in reconstruction, triggering + # getdata fallback. + msg = msg_blocktxn() + msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[2]] + [block.vtx[1]]) + test_node.send_and_ping(msg) + + # Tip should not have updated + assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock) + + # We should receive a getdata request + test_node.wait_for_getdata([block.sha256], timeout=10) + assert test_node.last_message["getdata"].inv[0].type == MSG_BLOCK or \ + test_node.last_message["getdata"].inv[0].type == MSG_BLOCK | MSG_WITNESS_FLAG + + # Send the same blocktxn and assert the sender gets disconnected. + with node.assert_debug_log(['previous compact block reconstruction attempt failed']): + test_node.send_message(msg) + test_node.wait_for_disconnect() + def test_getblocktxn_handler(self, test_node): node = self.nodes[0] # bitcoind will not send blocktxn responses for blocks whose height is @@ -957,6 +993,12 @@ def run_test(self): self.log.info("Testing handling of invalid compact blocks...") self.test_invalid_tx_in_compactblock(self.segwit_node) + self.log.info("Testing handling of multiple blocktxn responses...") + self.test_multiple_blocktxn_response(self.segwit_node) + + # The previous test will lead to a disconnection. Reconnect before continuing. + self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn()) + self.log.info("Testing invalid index in cmpctblock message...") self.test_invalid_cmpctblock_message() From 61cdc04a832cc5dfe98c48f8592c4de513258304 Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Fri, 12 Sep 2025 17:29:04 -0400 Subject: [PATCH 014/356] net: Do not apply whitelist permission to onion inbounds Tor inbound connections do not reveal the peer's actual network address. Therefore do not apply whitelist permissions to them. Co-authored-by: Vasil Dimov Github-Pull: #33395 Rebased-From: f563ce90818d486d2a199439d2f6ba39cd106352 --- src/net.cpp | 11 +++++++---- src/net.h | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 735985a841..7684877ec3 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -575,9 +575,9 @@ void CNode::CloseSocketDisconnect() m_i2p_sam_session.reset(); } -void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector& ranges) const { +void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, std::optional addr, const std::vector& ranges) const { for (const auto& subnet : ranges) { - if (subnet.m_subnet.Match(addr)) { + if (addr.has_value() && subnet.m_subnet.Match(addr.value())) { NetPermissions::AddFlag(flags, subnet.m_flags); } } @@ -1767,7 +1767,11 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, { int nInbound = 0; - AddWhitelistPermissionFlags(permission_flags, addr, vWhitelistedRangeIncoming); + const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); + + // Tor inbound connections do not reveal the peer's actual network address. + // Therefore do not apply address-based whitelist permissions to them. + AddWhitelistPermissionFlags(permission_flags, inbound_onion ? std::optional{} : addr, vWhitelistedRangeIncoming); { LOCK(m_nodes_mutex); @@ -1822,7 +1826,6 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); - const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); // The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is // detected, so use it whenever we signal NODE_P2P_V2. ServiceFlags local_services = GetLocalServices(); diff --git a/src/net.h b/src/net.h index e64d9a67f4..e025b20bcd 100644 --- a/src/net.h +++ b/src/net.h @@ -1364,7 +1364,7 @@ class CConnman bool AttemptToEvictConnection(); CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex); - void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector& ranges) const; + void AddWhitelistPermissionFlags(NetPermissionFlags& flags, std::optional addr, const std::vector& ranges) const; void DeleteNode(CNode* pnode); From 9bc4afb62cf04a41b62fe279f0db3d87e700cb3d Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 9 Sep 2025 10:15:08 +0100 Subject: [PATCH 015/356] doc: update release notes for 29.x --- doc/release-notes.md | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index b73e52dc57..0325d3a3e2 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.x is now available from: +Bitcoin Core version 29.2rc1 is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -37,19 +37,37 @@ unsupported systems. Notable changes =============== +### P2P + +- #32646 p2p: Add witness mutation check inside FillBlock +- #33296 net: check for empty header before calling FillBlock +- #33395 net: do not apply whitelist permissions to onion inbounds + ### CI - #32999 ci: Use APT_LLVM_V in msan task - #33099 ci: allow for any libc++ intrumentation & use it for TSAN - #33258 ci: use LLVM 21 +- #33364 ci: always use tag for LLVM checkout + +### Misc + +- #33310 trace: Workaround GCC bug compiling with old systemtap +- #33340 Fix benchmark CSV output Credits ======= Thanks to everyone who directly contributed to this release: +- Eugene Siegel - fanquake +- Greg Sanders +- Hennadii Stepanov +- Luke Dashjr - MarcoFalke +- Martin Zumsande +- Vasil Dimov As well as to everyone that helped with translations on [Transifex](https://explore.transifex.com/bitcoin/bitcoin/). From 461dd13fafa6f8175e2be4d96e8728e667ba4d69 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 17 Sep 2025 15:47:34 +0100 Subject: [PATCH 016/356] build: bump version to v29.2rc1 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index dc613a7655..05a86a1d97 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,9 +28,9 @@ get_directory_property(precious_variables CACHE_VARIABLES) #============================= set(CLIENT_NAME "Bitcoin Core") set(CLIENT_VERSION_MAJOR 29) -set(CLIENT_VERSION_MINOR 1) +set(CLIENT_VERSION_MINOR 2) set(CLIENT_VERSION_BUILD 0) -set(CLIENT_VERSION_RC 0) +set(CLIENT_VERSION_RC 1) set(CLIENT_VERSION_IS_RELEASE "true") set(COPYRIGHT_YEAR "2025") From f2bd79f80c74a2b77f14954ac65679417697a332 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 17 Sep 2025 15:54:29 +0100 Subject: [PATCH 017/356] doc: update manual pages for v29.2rc1 --- doc/man/bitcoin-cli.1 | 6 +++--- doc/man/bitcoin-qt.1 | 6 +++--- doc/man/bitcoin-tx.1 | 6 +++--- doc/man/bitcoin-util.1 | 6 +++--- doc/man/bitcoin-wallet.1 | 6 +++--- doc/man/bitcoind.1 | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index 428ddd3e2a..a8dc092a6c 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "September 2025" "bitcoin-cli v29.1.0" "User Commands" +.TH BITCOIN-CLI "1" "September 2025" "bitcoin-cli v29.2.0rc1" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.1.0 +bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc1 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.1.0 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Core RPC client version v29.1.0 +Bitcoin Core RPC client version v29.2.0rc1 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server. .PP diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index 3665a6a48a..7821b8fb44 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "September 2025" "bitcoin-qt v29.1.0" "User Commands" +.TH BITCOIN-QT "1" "September 2025" "bitcoin-qt v29.2.0rc1" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.1.0 +bitcoin-qt \- manual page for bitcoin-qt v29.2.0rc1 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Core version v29.1.0 +Bitcoin Core version v29.2.0rc1 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core. .PP diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index 16058f1bf9..a14a6be602 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "September 2025" "bitcoin-tx v29.1.0" "User Commands" +.TH BITCOIN-TX "1" "September 2025" "bitcoin-tx v29.2.0rc1" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.1.0 +bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc1 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.1.0 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Core bitcoin\-tx utility version v29.1.0 +Bitcoin Core bitcoin\-tx utility version v29.2.0rc1 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index a103bf40a5..e0cc27e2d7 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "September 2025" "bitcoin-util v29.1.0" "User Commands" +.TH BITCOIN-UTIL "1" "September 2025" "bitcoin-util v29.2.0rc1" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.1.0 +bitcoin-util \- manual page for bitcoin-util v29.2.0rc1 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.1.0 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-util utility version v29.1.0 +Bitcoin Core bitcoin\-util utility version v29.2.0rc1 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index b63494dc47..58bbf2715b 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "September 2025" "bitcoin-wallet v29.1.0" "User Commands" +.TH BITCOIN-WALLET "1" "September 2025" "bitcoin-wallet v29.2.0rc1" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.1.0 +bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0rc1 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-wallet utility version v29.1.0 +Bitcoin Core bitcoin\-wallet utility version v29.2.0rc1 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files. .PP diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index b8ee6bab52..0846f3e061 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "September 2025" "bitcoind v29.1.0" "User Commands" +.TH BITCOIND "1" "September 2025" "bitcoind v29.2.0rc1" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.1.0 +bitcoind \- manual page for bitcoind v29.2.0rc1 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Core daemon version v29.1.0 +Bitcoin Core daemon version v29.2.0rc1 .PP The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP From 80f9933c7a7d8293f7c6601c5138103ff37ae5fa Mon Sep 17 00:00:00 2001 From: laanwj <126646+laanwj@users.noreply.github.com> Date: Thu, 4 Sep 2025 21:22:49 +0200 Subject: [PATCH 018/356] net: Quiet down logging when router doesn't support natpmp/pcp When the router doesn't support natpmp and PCP, one'd normally expect the UDP packet to be ignored, and hit a time out. This logs a warning that is already in the debug category. However, there's also the case in which sending an UDP packet causes a ICMP response. This is returned to user space as "connection refused" (despite UDP having no concept of connections). Move the warnings from `Send` and `Recv` to debug level too, to reduce log spam in that case. Closes #33301. Github-Pull: #33311 Rebased-From: 4f1a4cbccd784e25f7932f1d0293602ef7f3e814 --- src/common/pcp.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/common/pcp.cpp b/src/common/pcp.cpp index d0d4955470..162abd1a82 100644 --- a/src/common/pcp.cpp +++ b/src/common/pcp.cpp @@ -230,7 +230,7 @@ std::optional> PCPSendRecv(Sock &sock, const std::string &p } // Dispatch packet to gateway. if (sock.Send(request.data(), request.size(), 0) != static_cast(request.size())) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "%s: Could not send request: %s\n", protocol, NetworkErrorString(WSAGetLastError())); + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Could not send request: %s\n", protocol, NetworkErrorString(WSAGetLastError())); return std::nullopt; // Network-level error, probably no use retrying. } @@ -251,7 +251,7 @@ std::optional> PCPSendRecv(Sock &sock, const std::string &p // Receive response. recvsz = sock.Recv(response, sizeof(response), MSG_DONTWAIT); if (recvsz < 0) { - LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "%s: Could not receive response: %s\n", protocol, NetworkErrorString(WSAGetLastError())); + LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Could not receive response: %s\n", protocol, NetworkErrorString(WSAGetLastError())); return std::nullopt; // Network-level error, probably no use retrying. } LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "%s: Received response of %d bytes: %s\n", protocol, recvsz, HexStr(Span(response, recvsz))); From 4b778a21835707f61a71f69f8cbb57bf322156b9 Mon Sep 17 00:00:00 2001 From: Claudio Raimondi Date: Thu, 28 Aug 2025 23:14:54 +0200 Subject: [PATCH 019/356] Add Dockerfile --- contrib/docker/Dockerfile | 71 +++++++++++++++++++++++++++++++++++++++ contrib/docker/README.md | 67 ++++++++++++++++++++++++++++++++++++ 2 files changed, 138 insertions(+) create mode 100644 contrib/docker/Dockerfile create mode 100644 contrib/docker/README.md diff --git a/contrib/docker/Dockerfile b/contrib/docker/Dockerfile new file mode 100644 index 0000000000..a3088a5b72 --- /dev/null +++ b/contrib/docker/Dockerfile @@ -0,0 +1,71 @@ +FROM alpine:3.22 AS builder + +RUN apk add --no-cache \ + build-base \ + cmake \ + boost-dev \ + libevent-dev \ + sqlite-dev \ + zeromq-dev \ + coreutils \ + binutils + +WORKDIR /opt/bitcoin + +COPY . . + +WORKDIR /opt/bitcoin/build + +RUN cmake .. \ + -DCMAKE_INSTALL_PREFIX="/usr/local/" \ + -DBUILD_DAEMON="ON" \ + -DBUILD_CLI="ON" \ + -DENABLE_WALLET="ON" \ + -DWITH_ZMQ="ON" \ + -DBUILD_TESTS="ON" \ + -DBUILD_GUI="OFF" \ + -DBUILD_TX="OFF" \ + -DBUILD_UTIL="OFF" \ + -DBUILD_WALLET_TOOL="OFF" \ + -DBUILD_BENCH="OFF" \ + -DBUILD_FUZZ_BINARY="OFF" \ + -DBUILD_UTIL_CHAINSTATE="OFF" \ + -DWITH_BDB="OFF" \ + -DWITH_USDT="OFF" \ + -DINSTALL_MAN="OFF" \ + -DWITH_CCACHE="OFF" + +RUN cmake --build . --parallel $(nproc) +RUN ctest --output-on-failure +RUN cmake --install . + +RUN strip --strip-unneeded /usr/local/bin/* + +FROM alpine:3.22 AS final + +ARG USER_ID=1000 +ARG GROUP_ID=1000 + +RUN apk add --no-cache \ + libevent \ + sqlite-libs \ + zeromq \ + boost-system \ + boost-filesystem \ + boost-program_options + +COPY --from=builder /usr/local/bin/bitcoind /usr/local/bin/bitcoind +COPY --from=builder /usr/local/bin/bitcoin-cli /usr/local/bin/bitcoin-cli + +RUN addgroup -S -g ${GROUP_ID} bitcoin && \ + adduser -S -u ${USER_ID} -G bitcoin -H -s /bin/false bitcoin + +WORKDIR /var/lib/bitcoind +EXPOSE 8333 8332 +USER bitcoin +VOLUME ["/var/lib/bitcoind", "/etc/bitcoin/bitcoin.conf"] + +ENTRYPOINT ["bitcoind", "-conf=/etc/bitcoin/bitcoin.conf", "-datadir=/var/lib/bitcoind"] + +HEALTHCHECK --interval=5m --timeout=15s --start-period=2m --start-interval=10s \ + CMD ["bitcoin-cli", "-conf=/etc/bitcoin/bitcoin.conf", "-datadir=/var/lib/bitcoind", "getblockchaininfo"] diff --git a/contrib/docker/README.md b/contrib/docker/README.md new file mode 100644 index 0000000000..45dee1efa8 --- /dev/null +++ b/contrib/docker/README.md @@ -0,0 +1,67 @@ + +# 🚀 Bitcoin Knots Docker Image (Headless Node) + +This Dockerfile builds and runs a **Bitcoin Knots** full node from source. + +## 🧱 Features + +* Stripped of all non-essential components (tests, debug data, documentation, etc.) +* Data directory persisted via volume +* Accessible via RPC + +--- + +## 📦 Build the Docker Image + +**make sure you're at the root of the repo first!** + +```bash +docker build \ + -f contrib/docker/Dockerfile \ + -t bitcoinknots \ + --build-arg USER_ID=$(id -u) \ + --build-arg GROUP_ID=$(id -g) \ + --load . +``` + +--- + +## ▶️ Run the Node + +```bash +docker run -d \ + --init \ + --user $(id -u):$(id -g) \ + --name bitcoinknots \ + -p 8333:8333 -p 127.0.0.1:8332:8332 \ + -v path/to/conf:/etc/bitcoin/bitcoin.conf:ro \ + -v path/to/data:/var/lib/bitcoind:rw \ + bitcoinknots +``` + +In case you want to use ZeroMQ sockets, make sure to expose those ports as well by adding `-p host_port:container_port` directives to the command above. +In case `path/to/data` is not writable by your user, consider overriding the `--user` flag. + +This will: + +* Start the node in the background +* Save the blockchain and config in `/path/to/data` +* Expose peer and RPC ports + +--- + +## 📊 Check Node Status + +```bash +docker logs bitcoinknots +``` + +--- + +## 🛑 Stop the Node + +```bash +docker stop bitcoinknots +``` + +--- From 5d10d86de1be5bb53c7e30db99fbaac2428a44c0 Mon Sep 17 00:00:00 2001 From: TheCharlatan Date: Mon, 8 Sep 2025 11:13:52 +0200 Subject: [PATCH 020/356] net: Add interrupt to pcp retry loop Without this interrupt bitcoind takes a long time to exit if requested to do so after a failed pcp lookup on startup. Github-Pull: #33338 Rebased-From: 188de70c86414b8b2ad5143f5c607b67686526ea --- src/common/pcp.cpp | 18 ++++++++++++------ src/common/pcp.h | 5 +++-- src/mapport.cpp | 6 +++--- src/test/fuzz/pcp.cpp | 7 +++++-- src/test/pcp_tests.cpp | 24 +++++++++++++----------- 5 files changed, 36 insertions(+), 24 deletions(-) diff --git a/src/common/pcp.cpp b/src/common/pcp.cpp index d0d4955470..70863fafdb 100644 --- a/src/common/pcp.cpp +++ b/src/common/pcp.cpp @@ -15,6 +15,7 @@ #include #include #include +#include namespace { @@ -217,7 +218,8 @@ CNetAddr PCPUnwrapAddress(Span wrapped_addr) //! PCP or NAT-PMP send-receive loop. std::optional> PCPSendRecv(Sock &sock, const std::string &protocol, Span request, int num_tries, std::chrono::milliseconds timeout_per_try, - std::function)> check_packet) + std::function)> check_packet, + CThreadInterrupt& interrupt) { using namespace std::chrono; // UDP is a potentially lossy protocol, so we try to send again a few times. @@ -238,6 +240,7 @@ std::optional> PCPSendRecv(Sock &sock, const std::string &p auto cur_time = time_point_cast(MockableSteadyClock::now()); auto deadline = cur_time + timeout_per_try; while ((cur_time = time_point_cast(MockableSteadyClock::now())) < deadline) { + if (interrupt) return std::nullopt; Sock::Event occurred = 0; if (!sock.Wait(deadline - cur_time, Sock::RECV, &occurred)) { LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "%s: Could not wait on socket: %s\n", protocol, NetworkErrorString(WSAGetLastError())); @@ -271,7 +274,7 @@ std::optional> PCPSendRecv(Sock &sock, const std::string &p } -std::variant NATPMPRequestPortMap(const CNetAddr &gateway, uint16_t port, uint32_t lifetime, int num_tries, std::chrono::milliseconds timeout_per_try) +std::variant NATPMPRequestPortMap(const CNetAddr &gateway, uint16_t port, uint32_t lifetime, CThreadInterrupt& interrupt, int num_tries, std::chrono::milliseconds timeout_per_try) { struct sockaddr_storage dest_addr; socklen_t dest_addrlen = sizeof(struct sockaddr_storage); @@ -319,7 +322,8 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g return false; // Wasn't response to what we expected, try receiving next packet. } return true; - }); + }, + interrupt); struct in_addr external_addr; if (recv_res) { @@ -361,7 +365,8 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g return false; // Wasn't response to what we expected, try receiving next packet. } return true; - }); + }, + interrupt); if (recv_res) { const std::span response = *recv_res; @@ -384,7 +389,7 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g } } -std::variant PCPRequestPortMap(const PCPMappingNonce &nonce, const CNetAddr &gateway, const CNetAddr &bind, uint16_t port, uint32_t lifetime, int num_tries, std::chrono::milliseconds timeout_per_try) +std::variant PCPRequestPortMap(const PCPMappingNonce &nonce, const CNetAddr &gateway, const CNetAddr &bind, uint16_t port, uint32_t lifetime, CThreadInterrupt& interrupt, int num_tries, std::chrono::milliseconds timeout_per_try) { struct sockaddr_storage dest_addr, bind_addr; socklen_t dest_addrlen = sizeof(struct sockaddr_storage), bind_addrlen = sizeof(struct sockaddr_storage); @@ -484,7 +489,8 @@ std::variant PCPRequestPortMap(const PCPMappingNonc return false; // Wasn't response to what we expected, try receiving next packet. } return true; - }); + }, + interrupt); if (!recv_res) { return MappingError::NETWORK_ERROR; diff --git a/src/common/pcp.h b/src/common/pcp.h index 44f9285c27..b3e36d13c5 100644 --- a/src/common/pcp.h +++ b/src/common/pcp.h @@ -6,6 +6,7 @@ #define BITCOIN_COMMON_PCP_H #include +#include #include @@ -51,7 +52,7 @@ struct MappingResult { //! * num_tries: Number of tries in case of no response. //! //! Returns the external_ip:external_port of the mapping if successful, otherwise a MappingError. -std::variant NATPMPRequestPortMap(const CNetAddr &gateway, uint16_t port, uint32_t lifetime, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000)); +std::variant NATPMPRequestPortMap(const CNetAddr &gateway, uint16_t port, uint32_t lifetime, CThreadInterrupt& interrupt, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000)); //! Try to open a port using RFC 6887 Port Control Protocol (PCP). Handles IPv4 and IPv6. //! @@ -63,6 +64,6 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g //! * num_tries: Number of tries in case of no response. //! //! Returns the external_ip:external_port of the mapping if successful, otherwise a MappingError. -std::variant PCPRequestPortMap(const PCPMappingNonce &nonce, const CNetAddr &gateway, const CNetAddr &bind, uint16_t port, uint32_t lifetime, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000)); +std::variant PCPRequestPortMap(const PCPMappingNonce &nonce, const CNetAddr &gateway, const CNetAddr &bind, uint16_t port, uint32_t lifetime, CThreadInterrupt& interrupt, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000)); #endif // BITCOIN_COMMON_PCP_H diff --git a/src/mapport.cpp b/src/mapport.cpp index 83105f51fd..976572c585 100644 --- a/src/mapport.cpp +++ b/src/mapport.cpp @@ -74,11 +74,11 @@ static void ProcessPCP() // Open a port mapping on whatever local address we have toward the gateway. struct in_addr inaddr_any; inaddr_any.s_addr = htonl(INADDR_ANY); - auto res = PCPRequestPortMap(pcp_nonce, *gateway4, CNetAddr(inaddr_any), private_port, requested_lifetime); + auto res = PCPRequestPortMap(pcp_nonce, *gateway4, CNetAddr(inaddr_any), private_port, requested_lifetime, g_mapport_interrupt); MappingError* pcp_err = std::get_if(&res); if (pcp_err && *pcp_err == MappingError::UNSUPP_VERSION) { LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "portmap: Got unsupported PCP version response, falling back to NAT-PMP\n"); - res = NATPMPRequestPortMap(*gateway4, private_port, requested_lifetime); + res = NATPMPRequestPortMap(*gateway4, private_port, requested_lifetime, g_mapport_interrupt); } handle_mapping(res); } @@ -93,7 +93,7 @@ static void ProcessPCP() // Try to open pinholes for all routable local IPv6 addresses. for (const auto &addr: GetLocalAddresses()) { if (!addr.IsRoutable() || !addr.IsIPv6()) continue; - auto res = PCPRequestPortMap(pcp_nonce, *gateway6, addr, private_port, requested_lifetime); + auto res = PCPRequestPortMap(pcp_nonce, *gateway6, addr, private_port, requested_lifetime, g_mapport_interrupt); handle_mapping(res); } } diff --git a/src/test/fuzz/pcp.cpp b/src/test/fuzz/pcp.cpp index 76fdded188..0beebe10b1 100644 --- a/src/test/fuzz/pcp.cpp +++ b/src/test/fuzz/pcp.cpp @@ -9,6 +9,7 @@ #include #include +#include using namespace std::literals; @@ -43,7 +44,8 @@ FUZZ_TARGET(pcp_request_port_map, .init = port_map_target_init) const auto local_addr{ConsumeNetAddr(fuzzed_data_provider)}; const auto port{fuzzed_data_provider.ConsumeIntegral()}; const auto lifetime{fuzzed_data_provider.ConsumeIntegral()}; - const auto res{PCPRequestPortMap(PCP_NONCE, gateway_addr, local_addr, port, lifetime, NUM_TRIES, TIMEOUT)}; + CThreadInterrupt interrupt; + const auto res{PCPRequestPortMap(PCP_NONCE, gateway_addr, local_addr, port, lifetime, interrupt, NUM_TRIES, TIMEOUT)}; // In case of success the mapping must be consistent with the request. if (const MappingResult* mapping = std::get_if(&res)) { @@ -69,7 +71,8 @@ FUZZ_TARGET(natpmp_request_port_map, .init = port_map_target_init) const auto gateway_addr{ConsumeNetAddr(fuzzed_data_provider)}; const auto port{fuzzed_data_provider.ConsumeIntegral()}; const auto lifetime{fuzzed_data_provider.ConsumeIntegral()}; - const auto res{NATPMPRequestPortMap(gateway_addr, port, lifetime, NUM_TRIES, TIMEOUT)}; + CThreadInterrupt interrupt; + const auto res{NATPMPRequestPortMap(gateway_addr, port, lifetime, interrupt, NUM_TRIES, TIMEOUT)}; // In case of success the mapping must be consistent with the request. if (const MappingResult* mapping = std::get_if(&res)) { diff --git a/src/test/pcp_tests.cpp b/src/test/pcp_tests.cpp index 967bef1946..cfb2962c9e 100644 --- a/src/test/pcp_tests.cpp +++ b/src/test/pcp_tests.cpp @@ -15,6 +15,8 @@ using namespace std::literals; +static CThreadInterrupt g_interrupt; + /// UDP test server operation. struct TestOp { std::chrono::milliseconds delay; @@ -295,7 +297,7 @@ BOOST_AUTO_TEST_CASE(natpmp_ipv4) return std::unique_ptr(); }; - auto res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, 1, 200ms); + auto res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, g_interrupt, 1, 200ms); MappingResult* mapping = std::get_if(&res); BOOST_REQUIRE(mapping); @@ -339,7 +341,7 @@ BOOST_AUTO_TEST_CASE(pcp_ipv4) return std::unique_ptr(); }; - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 1, 1000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 1, 1000ms); MappingResult* mapping = std::get_if(&res); BOOST_REQUIRE(mapping); @@ -383,7 +385,7 @@ BOOST_AUTO_TEST_CASE(pcp_ipv6) return std::unique_ptr(); }; - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv6, default_local_ipv6, 1234, 1000, 1, 1000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv6, default_local_ipv6, 1234, 1000, g_interrupt, 1, 1000ms); MappingResult* mapping = std::get_if(&res); BOOST_REQUIRE(mapping); @@ -406,7 +408,7 @@ BOOST_AUTO_TEST_CASE(pcp_timeout) ASSERT_DEBUG_LOG("pcp: Retrying (2)"); ASSERT_DEBUG_LOG("pcp: Timeout"); - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 3, 2000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 3, 2000ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); @@ -435,7 +437,7 @@ BOOST_AUTO_TEST_CASE(pcp_connrefused) ASSERT_DEBUG_LOG("pcp: Could not receive response"); - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 3, 2000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 3, 2000ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); @@ -495,7 +497,7 @@ BOOST_AUTO_TEST_CASE(pcp_ipv6_timeout_success) ASSERT_DEBUG_LOG("pcp: Retrying (1)"); ASSERT_DEBUG_LOG("pcp: Timeout"); - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv6, default_local_ipv6, 1234, 1000, 2, 2000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv6, default_local_ipv6, 1234, 1000, g_interrupt, 2, 2000ms); BOOST_CHECK(std::get_if(&res)); } @@ -534,7 +536,7 @@ BOOST_AUTO_TEST_CASE(pcp_ipv4_fail_no_resources) return std::unique_ptr(); }; - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 3, 1000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 3, 1000ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); @@ -570,7 +572,7 @@ BOOST_AUTO_TEST_CASE(pcp_ipv4_fail_unsupported_version) return std::unique_ptr(); }; - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 3, 1000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 3, 1000ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); @@ -602,7 +604,7 @@ BOOST_AUTO_TEST_CASE(natpmp_protocol_error) return std::unique_ptr(); }; - auto res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, 1, 200ms); + auto res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, g_interrupt, 1, 200ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); @@ -647,7 +649,7 @@ BOOST_AUTO_TEST_CASE(natpmp_protocol_error) return std::unique_ptr(); }; - res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, 1, 200ms); + res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, g_interrupt, 1, 200ms); err = std::get_if(&res); BOOST_REQUIRE(err); @@ -688,7 +690,7 @@ BOOST_AUTO_TEST_CASE(pcp_protocol_error) return std::unique_ptr(); }; - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 1, 1000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 1, 1000ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); From 7feab16eba27d3425b2f244e33d83939f7f0e719 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 19 Sep 2025 00:23:01 +0000 Subject: [PATCH 021/356] Diff-minimise --- src/test/fuzz/pcp.cpp | 8 +++----- src/test/pcp_tests.cpp | 24 +++++++++++------------- src/test/util/setup_common.h | 11 +++++++++++ 3 files changed, 25 insertions(+), 18 deletions(-) diff --git a/src/test/fuzz/pcp.cpp b/src/test/fuzz/pcp.cpp index 0beebe10b1..bf95dc916c 100644 --- a/src/test/fuzz/pcp.cpp +++ b/src/test/fuzz/pcp.cpp @@ -4,12 +4,12 @@ #include #include +#include #include #include #include #include -#include using namespace std::literals; @@ -44,8 +44,7 @@ FUZZ_TARGET(pcp_request_port_map, .init = port_map_target_init) const auto local_addr{ConsumeNetAddr(fuzzed_data_provider)}; const auto port{fuzzed_data_provider.ConsumeIntegral()}; const auto lifetime{fuzzed_data_provider.ConsumeIntegral()}; - CThreadInterrupt interrupt; - const auto res{PCPRequestPortMap(PCP_NONCE, gateway_addr, local_addr, port, lifetime, interrupt, NUM_TRIES, TIMEOUT)}; + const auto res{PCPRequestPortMap(PCP_NONCE, gateway_addr, local_addr, port, lifetime, NUM_TRIES, TIMEOUT)}; // In case of success the mapping must be consistent with the request. if (const MappingResult* mapping = std::get_if(&res)) { @@ -71,8 +70,7 @@ FUZZ_TARGET(natpmp_request_port_map, .init = port_map_target_init) const auto gateway_addr{ConsumeNetAddr(fuzzed_data_provider)}; const auto port{fuzzed_data_provider.ConsumeIntegral()}; const auto lifetime{fuzzed_data_provider.ConsumeIntegral()}; - CThreadInterrupt interrupt; - const auto res{NATPMPRequestPortMap(gateway_addr, port, lifetime, interrupt, NUM_TRIES, TIMEOUT)}; + const auto res{NATPMPRequestPortMap(gateway_addr, port, lifetime, NUM_TRIES, TIMEOUT)}; // In case of success the mapping must be consistent with the request. if (const MappingResult* mapping = std::get_if(&res)) { diff --git a/src/test/pcp_tests.cpp b/src/test/pcp_tests.cpp index cfb2962c9e..967bef1946 100644 --- a/src/test/pcp_tests.cpp +++ b/src/test/pcp_tests.cpp @@ -15,8 +15,6 @@ using namespace std::literals; -static CThreadInterrupt g_interrupt; - /// UDP test server operation. struct TestOp { std::chrono::milliseconds delay; @@ -297,7 +295,7 @@ BOOST_AUTO_TEST_CASE(natpmp_ipv4) return std::unique_ptr(); }; - auto res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, g_interrupt, 1, 200ms); + auto res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, 1, 200ms); MappingResult* mapping = std::get_if(&res); BOOST_REQUIRE(mapping); @@ -341,7 +339,7 @@ BOOST_AUTO_TEST_CASE(pcp_ipv4) return std::unique_ptr(); }; - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 1, 1000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 1, 1000ms); MappingResult* mapping = std::get_if(&res); BOOST_REQUIRE(mapping); @@ -385,7 +383,7 @@ BOOST_AUTO_TEST_CASE(pcp_ipv6) return std::unique_ptr(); }; - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv6, default_local_ipv6, 1234, 1000, g_interrupt, 1, 1000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv6, default_local_ipv6, 1234, 1000, 1, 1000ms); MappingResult* mapping = std::get_if(&res); BOOST_REQUIRE(mapping); @@ -408,7 +406,7 @@ BOOST_AUTO_TEST_CASE(pcp_timeout) ASSERT_DEBUG_LOG("pcp: Retrying (2)"); ASSERT_DEBUG_LOG("pcp: Timeout"); - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 3, 2000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 3, 2000ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); @@ -437,7 +435,7 @@ BOOST_AUTO_TEST_CASE(pcp_connrefused) ASSERT_DEBUG_LOG("pcp: Could not receive response"); - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 3, 2000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 3, 2000ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); @@ -497,7 +495,7 @@ BOOST_AUTO_TEST_CASE(pcp_ipv6_timeout_success) ASSERT_DEBUG_LOG("pcp: Retrying (1)"); ASSERT_DEBUG_LOG("pcp: Timeout"); - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv6, default_local_ipv6, 1234, 1000, g_interrupt, 2, 2000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv6, default_local_ipv6, 1234, 1000, 2, 2000ms); BOOST_CHECK(std::get_if(&res)); } @@ -536,7 +534,7 @@ BOOST_AUTO_TEST_CASE(pcp_ipv4_fail_no_resources) return std::unique_ptr(); }; - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 3, 1000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 3, 1000ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); @@ -572,7 +570,7 @@ BOOST_AUTO_TEST_CASE(pcp_ipv4_fail_unsupported_version) return std::unique_ptr(); }; - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 3, 1000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 3, 1000ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); @@ -604,7 +602,7 @@ BOOST_AUTO_TEST_CASE(natpmp_protocol_error) return std::unique_ptr(); }; - auto res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, g_interrupt, 1, 200ms); + auto res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, 1, 200ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); @@ -649,7 +647,7 @@ BOOST_AUTO_TEST_CASE(natpmp_protocol_error) return std::unique_ptr(); }; - res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, g_interrupt, 1, 200ms); + res = NATPMPRequestPortMap(default_gateway_ipv4, 1234, 1000, 1, 200ms); err = std::get_if(&res); BOOST_REQUIRE(err); @@ -690,7 +688,7 @@ BOOST_AUTO_TEST_CASE(pcp_protocol_error) return std::unique_ptr(); }; - auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, g_interrupt, 1, 1000ms); + auto res = PCPRequestPortMap(TEST_NONCE, default_gateway_ipv4, bind_any_ipv4, 1234, 1000, 1, 1000ms); MappingError* err = std::get_if(&res); BOOST_REQUIRE(err); diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h index 33ad258457..2b43d2dba2 100644 --- a/src/test/util/setup_common.h +++ b/src/test/util/setup_common.h @@ -6,6 +6,7 @@ #define BITCOIN_TEST_UTIL_SETUP_COMMON_H #include // IWYU pragma: export +#include #include #include #include @@ -303,4 +304,14 @@ class HasReason const std::string m_reason; }; +static inline std::variant NATPMPRequestPortMap(const CNetAddr &gateway, uint16_t port, uint32_t lifetime, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000)) { + static CThreadInterrupt interrupt; + return NATPMPRequestPortMap(gateway, port, lifetime, interrupt, num_tries, timeout_per_try); +} + +static inline std::variant PCPRequestPortMap(const PCPMappingNonce &nonce, const CNetAddr &gateway, const CNetAddr &bind, uint16_t port, uint32_t lifetime, int num_tries = 3, std::chrono::milliseconds timeout_per_try = std::chrono::milliseconds(1000)) { + static CThreadInterrupt interrupt; + return PCPRequestPortMap(nonce, gateway, bind, port, lifetime, interrupt, num_tries, timeout_per_try); +} + #endif // BITCOIN_TEST_UTIL_SETUP_COMMON_H From 7910eaee5cdf5184cbb780de39d64f8ee474233b Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Wed, 3 Sep 2025 16:00:31 -0700 Subject: [PATCH 022/356] gui: Avoid pathological QT text/markdown behavior... during text selection by only setting plaintext mime data. Github-Pull: gui#886 Rebased-From: 6a371b70c87ad6b763c89384562fce8549f37434 --- src/qt/forms/debugwindow.ui | 6 +++++- src/qt/rpcconsole.h | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui index 8be4a955b3..136d820557 100644 --- a/src/qt/forms/debugwindow.ui +++ b/src/qt/forms/debugwindow.ui @@ -524,7 +524,7 @@ - + 0 @@ -1430,6 +1430,10 @@ clear() + + PlainCopyTextEdit + QTextEdit + diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h index ec531c99c8..8e92b20678 100644 --- a/src/qt/rpcconsole.h +++ b/src/qt/rpcconsole.h @@ -12,6 +12,9 @@ #include #include +#include +#include +#include #include class ClientModel; @@ -157,4 +160,20 @@ public Q_SLOTS: void updateNetworkState(); }; +/** + * A version of QTextEdit that only populates plaintext mime data from a + * selection, this avoids some bad behavior in QT's HTML->Markdown conversion. + */ +class PlainCopyTextEdit : public QTextEdit { + Q_OBJECT +public: + using QTextEdit::QTextEdit; +protected: + QMimeData* createMimeDataFromSelection() const override { + auto md = new QMimeData(); + md->setText(textCursor().selection().toPlainText()); + return md; + } +}; + #endif // BITCOIN_QT_RPCCONSOLE_H From dc4d75e4a3355118b4dc522e3d049e5e15804291 Mon Sep 17 00:00:00 2001 From: Fabian Jahr Date: Sun, 7 Sep 2025 18:26:00 +0200 Subject: [PATCH 023/356] common: Make arith_uint256 trivially copyable Replacing the custom code with default behavior should not result in a change of behavior since base_uint contains a simple array of uint32_t and compiler generated versions of the code could be better optimized. Co-authored-by: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Github-Pull: #33332 Rebased-From: 653a9849d5f98ba80e334ddc0ae9a5e367459f59 --- src/arith_uint256.h | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/src/arith_uint256.h b/src/arith_uint256.h index 38b7453034..0acc94b611 100644 --- a/src/arith_uint256.h +++ b/src/arith_uint256.h @@ -35,20 +35,8 @@ class base_uint pn[i] = 0; } - base_uint(const base_uint& b) - { - for (int i = 0; i < WIDTH; i++) - pn[i] = b.pn[i]; - } - - base_uint& operator=(const base_uint& b) - { - if (this != &b) { - for (int i = 0; i < WIDTH; i++) - pn[i] = b.pn[i]; - } - return *this; - } + base_uint(const base_uint& b) = default; + base_uint& operator=(const base_uint& b) = default; base_uint(uint64_t b) { @@ -275,6 +263,9 @@ class arith_uint256 : public base_uint<256> { friend arith_uint256 UintToArith256(const uint256 &); }; +// Keeping the trivially copyable property is beneficial for performance +static_assert(std::is_trivially_copyable_v); + uint256 ArithToUint256(const arith_uint256 &); arith_uint256 UintToArith256(const uint256 &); From 39631839b7d592b0a1b9c86297d104a9ac05edb8 Mon Sep 17 00:00:00 2001 From: Raimo33 Date: Sun, 7 Sep 2025 02:00:00 +0200 Subject: [PATCH 024/356] node: optimize CBlockIndexWorkComparator Refactor the comparator logic in CBlockIndexWorkComparator::operator() to reduce the amounts of branches and improve readability without changing semantics. The previous implementation used multiple separate comparisons with explicit branches for greater-than and less-than cases, resulting in unnecessary code paths. The new implementation consolidates comparisons into single inequality checks and reduces complexity while preserving its original behavior. This change is particularly beneficial for loading blocks from files and reindexing. taskset -c 1 ./bin/bench_bitcoin --filter="(CheckBlockIndex|LoadExternalBlockFile|BlockToJsonVerboseWrite)" -output-csv=bench_old.csv --min-time=30000 | ns/op | op/s | err% | total | benchmark |--------------------:|--------------------:|--------:|----------:|:---------- | 26,557,419.20 | 37.65 | 0.1% | 32.85 | `BlockToJsonVerboseWrite` | 129,988.82 | 7,692.97 | 0.0% | 33.02 | `CheckBlockIndex` | 21,661,396.96 | 46.17 | 0.5% | 32.37 | `LoadExternalBlockFile` taskset -c 1 ./bin/bench_bitcoin --filter="(CheckBlockIndex|LoadExternalBlockFile|BlockToJsonVerboseWrite|WalletIsMineDescriptors)" -output-csv=bench_new.csv --min-time=30000 | ns/op | op/s | err% | total | benchmark |--------------------:|--------------------:|--------:|----------:|:---------- | 27,930,130.95 | 35.80 | 0.1% | 32.96 | `BlockToJsonVerboseWrite` | 115,346.65 | 8,669.52 | 0.0% | 33.00 | `CheckBlockIndex` | 20,389,679.85 | 49.04 | 0.4% | 31.76 | `LoadExternalBlockFile` Github-Pull: #33334 Rebased-From: 80ac0467ef4c3fb47f803d948e8df0a4b07e0e9c --- src/node/blockstorage.cpp | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index f02840d889..133a0fb024 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -144,21 +144,19 @@ std::atomic_bool fReindex(false); bool CBlockIndexWorkComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const { // First sort by most total work, ... - if (pa->nChainWork > pb->nChainWork) return false; - if (pa->nChainWork < pb->nChainWork) return true; + if (pa->nChainWork != pb->nChainWork) { + return pa->nChainWork < pb->nChainWork; + } // ... then by earliest activatable time, ... - if (pa->nSequenceId < pb->nSequenceId) return false; - if (pa->nSequenceId > pb->nSequenceId) return true; + if (pa->nSequenceId != pb->nSequenceId) { + return pa->nSequenceId > pb->nSequenceId; + } // Use pointer address as tie breaker (should only happen with blocks // loaded from disk, as those share the same id: 0 for blocks on the // best chain, 1 for all others). - if (pa < pb) return false; - if (pa > pb) return true; - - // Identical blocks. - return false; + return pa > pb; } bool CBlockIndexHeightOnlyComparator::operator()(const CBlockIndex* pa, const CBlockIndex* pb) const From 6712f43fdb430ae75bedbecea6d5eef7040af3ab Mon Sep 17 00:00:00 2001 From: sashass1315 Date: Wed, 17 Sep 2025 11:46:51 +0300 Subject: [PATCH 025/356] coinstats: avoid unnecessary Coin copy in ApplyHash Github-Pull: #33410 Rebased-From: 5a56203f4e471f3345d038c486b580d738bd147b --- src/kernel/coinstats.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kernel/coinstats.cpp b/src/kernel/coinstats.cpp index 9bd755ed27..958830e115 100644 --- a/src/kernel/coinstats.cpp +++ b/src/kernel/coinstats.cpp @@ -94,7 +94,7 @@ static void ApplyHash(T& hash_obj, const uint256& hash, const std::mapfirst); - Coin coin = it->second; + const Coin& coin = it->second; ApplyCoinHash(hash_obj, outpoint, coin); } } From ba0bd1d3a999c8d8a057d137a55a3a357192c12c Mon Sep 17 00:00:00 2001 From: Hodlinator <172445034+hodlinator@users.noreply.github.com> Date: Thu, 18 Sep 2025 09:29:44 +0200 Subject: [PATCH 026/356] build(windows): Remove lingering registry entries and shortcuts upon install Prior to fb2b05b1259d3e69e6e675adfa30b429424c7625 / #32132 we installed using these paths. The lingering registry entries for the uninstaller would show up as "Bitcoin Core (64-bit)" in the list of installed programs and fail to work. Github-Pull: #33422 Rebased-From: 1a4ad0ae501d15f77b1601ace3e1a1c8bf440dd6 --- share/setup.nsi.in | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/share/setup.nsi.in b/share/setup.nsi.in index 9649a2bd07..7a0bbd811c 100644 --- a/share/setup.nsi.in +++ b/share/setup.nsi.in @@ -112,6 +112,15 @@ Section -post SEC0001 WriteRegStr HKCR "@CLIENT_TARNAME@" "" "URL:Bitcoin" WriteRegStr HKCR "@CLIENT_TARNAME@\DefaultIcon" "" $INSTDIR\@BITCOIN_GUI_NAME@@EXEEXT@ WriteRegStr HKCR "@CLIENT_TARNAME@\shell\open\command" "" '"$INSTDIR\@BITCOIN_GUI_NAME@@EXEEXT@" "%1"' + + DeleteRegValue HKCU "${REGKEY} (64-bit)\Components" Main + DeleteRegKey HKCU "SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall\$(^Name) (64-bit)" + Delete /REBOOTOK "$SMPROGRAMS\$StartMenuGroup\Uninstall $(^Name) (64-bit).lnk" + Delete /REBOOTOK "$SMPROGRAMS\$StartMenuGroup\$(^Name) (64-bit).lnk" + DeleteRegValue HKCU "${REGKEY} (64-bit)" StartMenuGroup + DeleteRegValue HKCU "${REGKEY} (64-bit)" Path + DeleteRegKey /IfEmpty HKCU "${REGKEY} (64-bit)\Components" + DeleteRegKey /IfEmpty HKCU "${REGKEY} (64-bit)" SectionEnd # Macro for selecting uninstaller sections From 79b4c276e7b9b526fa8f563b1e09b2b970baece6 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Sat, 13 Sep 2025 23:04:56 +0000 Subject: [PATCH 027/356] Bugfix: QA: rpc_bind: Skip nonloopback test if no such address is found --- test/functional/rpc_bind.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/functional/rpc_bind.py b/test/functional/rpc_bind.py index 53916d5290..c9696dd110 100755 --- a/test/functional/rpc_bind.py +++ b/test/functional/rpc_bind.py @@ -84,7 +84,10 @@ def run_test(self): if not self.options.run_nonloopback: self._run_loopback_tests() if not self.options.run_ipv4 and not self.options.run_ipv6: - self._run_nonloopback_tests() + if self.non_loopback_ip: + self._run_nonloopback_tests() + else: + self.log.info('Non-loopback IP address not found, skipping non-loopback tests') def _run_loopback_tests(self): if self.options.run_ipv4: From a684c7e963b77b618d7f9cc17a214a1049a5182f Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Sat, 20 Sep 2025 02:55:08 +0000 Subject: [PATCH 028/356] guix: Rename win64*-unsigned to win64*-pgpverifiable Users were confused about it being "unsigned" --- contrib/guix/libexec/build.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 82c13d5e71..86257124cd 100755 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -258,7 +258,7 @@ mkdir -p "$DISTSRC" case "$HOST" in *mingw*) cmake --build build -j "$JOBS" -t deploy ${V:+--verbose} - mv build/bitcoin-win64-setup.exe "${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" + mv build/bitcoin-win64-setup.exe "${OUTDIR}/${DISTNAME}-win64-setup-pgpverifiable.exe" ;; esac @@ -317,8 +317,8 @@ mkdir -p "$DISTSRC" | xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}" find "${DISTNAME}" -not -name "*.dbg" \ | sort \ - | zip -X@ "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-unsigned.zip" \ - || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-unsigned.zip" && exit 1 ) + | zip -X@ "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-pgpverifiable.zip" \ + || ( rm -f "${OUTDIR}/${DISTNAME}-${HOST//x86_64-w64-mingw32/win64}-pgpverifiable.zip" && exit 1 ) find "${DISTNAME}" -name "*.dbg" -print0 \ | xargs -0r touch --no-dereference --date="@${SOURCE_DATE_EPOCH}" find "${DISTNAME}" -name "*.dbg" \ @@ -355,7 +355,7 @@ mkdir -p "$DISTSRC" ( cd ./windeploy mkdir -p unsigned - cp --target-directory=unsigned/ "${OUTDIR}/${DISTNAME}-win64-setup-unsigned.exe" + cp --target-directory=unsigned/ "${OUTDIR}/${DISTNAME}-win64-setup-pgpverifiable.exe" cp -r --target-directory=unsigned/ "${INSTALLPATH}" find unsigned/ -name "*.dbg" -print0 \ | xargs -0r rm From 63ec908ea624f300f7ec0f8e050652a3dd487118 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 23 Sep 2025 02:15:15 +0000 Subject: [PATCH 029/356] Bugfix: Correctly handle pruneduringinit=0 by treating it as manual-prune until sync completes --- src/node/blockmanager_args.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/node/blockmanager_args.cpp b/src/node/blockmanager_args.cpp index 0d96198c25..127e940175 100644 --- a/src/node/blockmanager_args.cpp +++ b/src/node/blockmanager_args.cpp @@ -49,7 +49,11 @@ util::Result ApplyArgsManOptions(const ArgsManager& args, BlockManager::Op if (const auto prune_during_init{args.GetIntArg("-pruneduringinit")}) { if (*prune_during_init == -1) { opts.prune_target_during_init = -1; - } else if (const auto prune_parsed = ParsePruneOption(*prune_during_init, "-pruneduringinit")) { + } else if (auto prune_parsed = ParsePruneOption(*prune_during_init, "-pruneduringinit")) { + if (!*prune_parsed) { + // We don't actually disable pruning, just treat it as manual until sync completes + *prune_parsed = BlockManager::PRUNE_TARGET_MANUAL; + } // NOTE: PRUNE_TARGET_MANUAL is >int64 max opts.prune_target_during_init = std::min(std::numeric_limits::max(), (int64_t)*prune_parsed); } else { From f99cc0f024a9a2d307ed47514c41a8bac1c3ff49 Mon Sep 17 00:00:00 2001 From: Trevor Arjeski Date: Sun, 21 Sep 2025 09:24:29 +0300 Subject: [PATCH 030/356] depends: fetch miniupnpc sources from github releases miniupnp.tuxfamily.org seems unreliable or still unavailable. relates to 21b8a14d37 Github-Pull: knots#192 Rebased-From: 07f0df46fba49366b28080834cdbc9066ea7a39e --- depends/packages/miniupnpc.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depends/packages/miniupnpc.mk b/depends/packages/miniupnpc.mk index 59e7a31310..7afb2ed16f 100644 --- a/depends/packages/miniupnpc.mk +++ b/depends/packages/miniupnpc.mk @@ -1,6 +1,6 @@ package=miniupnpc $(package)_version=2.3.3 -$(package)_download_path=https://miniupnp.tuxfamily.org/files/ +$(package)_download_path=https://github.com/miniupnp/miniupnp/releases/download/$(package)_$(subst .,_,$($(package)_version))/ $(package)_file_name=$(package)-$($(package)_version).tar.gz $(package)_sha256_hash=d52a0afa614ad6c088cc9ddff1ae7d29c8c595ac5fdd321170a05f41e634bd1a $(package)_patches=dont_leak_info.patch From 68abc8a3262b80936c2e42bb174065e9864170a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Le=CC=81o=20Haf?= Date: Sat, 13 Sep 2025 12:07:43 +0200 Subject: [PATCH 031/356] =?UTF-8?q?add=20L=C3=A9o=20Haf=20DNS=20seed?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- src/kernel/chainparams.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index c995225064..01886b0823 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -145,6 +145,7 @@ class CMainParams : public CChainParams { vSeeds.emplace_back("seed.bitcoin.sipa.be."); // Pieter Wuille, only supports x1, x5, x9, and xd vSeeds.emplace_back("dnsseed.bluematt.me."); // Matt Corallo, only supports x9 vSeeds.emplace_back("dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us."); // Luke Dashjr + vSeeds.emplace_back("seed.bitcoin.haf.ovh."); // Léo Haf vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch."); // Jonas Schnelli, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.btc.petertodd.net."); // Peter Todd, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.bitcoin.sprovoost.nl."); // Sjors Provoost From 1758a74d7af483141f9f18e7eeefd29987670434 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 23 Sep 2025 02:05:01 +0000 Subject: [PATCH 032/356] icon: Render macOS icns as a macOS-style icon --- src/qt/CMakeLists.txt | 9 +++++++-- src/qt/res/src/bitcoin-mac.svg | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 src/qt/res/src/bitcoin-mac.svg diff --git a/src/qt/CMakeLists.txt b/src/qt/CMakeLists.txt index 4ae17286b9..876f08697b 100644 --- a/src/qt/CMakeLists.txt +++ b/src/qt/CMakeLists.txt @@ -61,15 +61,20 @@ set(ICO_SPECS 256 64 48 32 24 20 16 ) set(ICNS_SPECS - 1024 512 256 128 32 16 + 1024 ) set(EXTRA_ICON_SPECS 290 256 ) +if(CMAKE_SYSTEM_NAME STREQUAL "Darwin") + set(bitcoin_svg "res/src/bitcoin-mac.svg") +else() + set(bitcoin_svg "res/src/bitcoin.svg") +endif() + set(ICON_SPECS ${ICO_SPECS} ${ICNS_SPECS} ${EXTRA_ICON_SPECS}) list(REMOVE_DUPLICATES ICON_SPECS) -set(bitcoin_svg "res/src/bitcoin.svg") file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${ICON_PATH}") foreach(size IN LISTS ICON_SPECS) set(png "${ICON_PATH}/bitcoin${size}.png") diff --git a/src/qt/res/src/bitcoin-mac.svg b/src/qt/res/src/bitcoin-mac.svg new file mode 100644 index 0000000000..f1562da724 --- /dev/null +++ b/src/qt/res/src/bitcoin-mac.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + From 4341ac48b099e4647eb4a2b74e6c262546371483 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 23 Sep 2025 10:27:13 +0000 Subject: [PATCH 033/356] Delete release notes fragments --- doc/release-30635.md | 5 ----- 1 file changed, 5 deletions(-) delete mode 100644 doc/release-30635.md diff --git a/doc/release-30635.md b/doc/release-30635.md deleted file mode 100644 index 0ec68e93cc..0000000000 --- a/doc/release-30635.md +++ /dev/null @@ -1,5 +0,0 @@ -Updated RPCs ------------- - -- The waitfornewblock now takes an optional `current_tip` argument. It is also no longer hidden. (#30635) -- The waitforblock and waitforblockheight RPCs are no longer hidden. (#30635) From f63b8e960d5d06cdbbc360aaf781c13fd5aca172 Mon Sep 17 00:00:00 2001 From: will Date: Sat, 26 Jul 2025 09:41:10 +0100 Subject: [PATCH 034/356] ci: add configure environment action Github-Pull: #32989 Rebased-From: b8fcc9fcbcd --- .../actions/configure-environment/action.yml | 27 +++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/actions/configure-environment/action.yml diff --git a/.github/actions/configure-environment/action.yml b/.github/actions/configure-environment/action.yml new file mode 100644 index 0000000000..aae5016bdc --- /dev/null +++ b/.github/actions/configure-environment/action.yml @@ -0,0 +1,27 @@ +name: 'Configure environment' +description: 'Configure CI, cache and container name environment variables' +runs: + using: 'composite' + steps: + - name: Set CI and cache directories + shell: bash + run: | + echo "BASE_ROOT_DIR=${{ runner.temp }}" >> "$GITHUB_ENV" + echo "BASE_BUILD_DIR=${{ runner.temp }}/build" >> "$GITHUB_ENV" + echo "CCACHE_DIR=${{ runner.temp }}/ccache_dir" >> $GITHUB_ENV + echo "DEPENDS_DIR=${{ runner.temp }}/depends" >> "$GITHUB_ENV" + echo "BASE_CACHE=${{ runner.temp }}/depends/built" >> $GITHUB_ENV + echo "SOURCES_PATH=${{ runner.temp }}/depends/sources" >> $GITHUB_ENV + echo "PREVIOUS_RELEASES_DIR=${{ runner.temp }}/previous_releases" >> $GITHUB_ENV + + - name: Set cache hashes + shell: bash + run: | + echo "DEPENDS_HASH=$(git ls-tree HEAD depends "ci/test/$FILE_ENV" | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV + echo "PREVIOUS_RELEASES_HASH=$(git ls-tree HEAD test/get_previous_releases.py | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV + + - name: Get container name + shell: bash + run: | + source $FILE_ENV + echo "CONTAINER_NAME=$CONTAINER_NAME" >> "$GITHUB_ENV" From 301aa5d814b620287b65d93399a20a794659cc79 Mon Sep 17 00:00:00 2001 From: will Date: Mon, 16 Jun 2025 20:02:20 +0100 Subject: [PATCH 035/356] ci: add caching actions Github-Pull: #32989 Rebased-From: b232b0fa5e9 Add "Restore" and "Save" caching actions. These actions reduce boilerplate in the main ci.yml configuration file. These actions are implemented so that caches will be saved on `push` only. When a pull request is opened it will cache hit on the caches from the lastest push, or in the case of depends will hit on any matching depends hash, falling back to partial matches. Depends caches are hashed using `$(git ls-tree HEAD depends "ci/test/$FILE_ENV" | sha256sum | cut -d' ' -f1)` and this hash is passed in as an input to the actions. This means we direct cache hit in cases where depends would not be re-built, otherwise falling back to a partial match. Previous releases cache is hashed similarly to depends, but using the test/get_previous_releases.py file. The cirruslabs cache action will fallback transparently to GitHub's cache in the case that the job is not being run on a Cirrus Runner, making these compatible with running on forks (on free GH hardware). --- .github/actions/restore-caches/action.yml | 47 +++++++++++++++++++++++ .github/actions/save-caches/action.yml | 39 +++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 .github/actions/restore-caches/action.yml create mode 100644 .github/actions/save-caches/action.yml diff --git a/.github/actions/restore-caches/action.yml b/.github/actions/restore-caches/action.yml new file mode 100644 index 0000000000..8dc35d4902 --- /dev/null +++ b/.github/actions/restore-caches/action.yml @@ -0,0 +1,47 @@ +name: 'Restore Caches' +description: 'Restore ccache, depends sources, and built depends caches' +runs: + using: 'composite' + steps: + - name: Restore Ccache cache + id: ccache-cache + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.CCACHE_DIR }} + key: ccache-${{ env.CONTAINER_NAME }}-${{ github.run_id }} + restore-keys: | + ccache-${{ env.CONTAINER_NAME }}- + + - name: Restore depends sources cache + id: depends-sources + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.SOURCES_PATH }} + key: depends-sources-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + restore-keys: | + depends-sources-${{ env.CONTAINER_NAME }}- + + - name: Restore built depends cache + id: depends-built + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.BASE_CACHE }} + key: depends-built-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + restore-keys: | + depends-built-${{ env.CONTAINER_NAME }}- + + - name: Restore previous releases cache + id: previous-releases + uses: cirruslabs/cache/restore@v4 + with: + path: ${{ env.PREVIOUS_RELEASES_DIR }} + key: previous-releases-${{ env.CONTAINER_NAME }}-${{ env.PREVIOUS_RELEASES_HASH }} + restore-keys: | + previous-releases-${{ env.CONTAINER_NAME }}- + + - name: export cache hits + shell: bash + run: | + echo "depends-sources-cache-hit=${{ steps.depends-sources.outputs.cache-hit }}" >> $GITHUB_ENV + echo "depends-built-cache-hit=${{ steps.depends-built.outputs.cache-hit }}" >> $GITHUB_ENV + echo "previous-releases-cache-hit=${{ steps.previous-releases.outputs.cache-hit }}" >> $GITHUB_ENV diff --git a/.github/actions/save-caches/action.yml b/.github/actions/save-caches/action.yml new file mode 100644 index 0000000000..0e3b31246c --- /dev/null +++ b/.github/actions/save-caches/action.yml @@ -0,0 +1,39 @@ +name: 'Save Caches' +description: 'Save ccache, depends sources, and built depends caches' +runs: + using: 'composite' + steps: + - name: debug cache hit inputs + shell: bash + run: | + echo "depends sources direct cache hit to primary key: ${{ env.depends-sources-cache-hit }}" + echo "depends built direct cache hit to primary key: ${{ env.depends-built-cache-hit }}" + echo "previous releases direct cache hit to primary key: ${{ env.previous-releases-cache-hit }}" + + - name: Save Ccache cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) }} + with: + path: ${{ env.CCACHE_DIR }} + key: ccache-${{ env.CONTAINER_NAME }}-${{ github.run_id }} + + - name: Save depends sources cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.depends-sources-cache-hit != 'true') }} + with: + path: ${{ env.SOURCES_PATH }} + key: depends-sources-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + + - name: Save built depends cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.depends-built-cache-hit != 'true' )}} + with: + path: ${{ env.BASE_CACHE }} + key: depends-built-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }} + + - name: Save previous releases cache + uses: cirruslabs/cache/save@v4 + if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.previous-releases-cache-hit != 'true' )}} + with: + path: ${{ env.PREVIOUS_RELEASES_DIR }} + key: previous-releases-${{ env.CONTAINER_NAME }}-${{ env.PREVIOUS_RELEASES_HASH }} From 954c1a55e4a6322267071f5bffeb3188a6ac7d59 Mon Sep 17 00:00:00 2001 From: will Date: Mon, 28 Jul 2025 16:05:32 +0100 Subject: [PATCH 036/356] ci: add REPO_USE_CIRRUS_RUNNERS Github-Pull: #32989 Rebased-From: 33ba073df7a If set, Cirrus runners will be used on pushes to, and pull requests against, this repository. Forks can set this if they have their own cirrus runners. --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2cac4eab0b..3ce17dff3e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,6 +20,7 @@ concurrency: env: CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error MAKEJOBS: '-j10' + REPO_USE_CIRRUS_RUNNERS: 'bitcoin/bitcoin' # Use cirrus runners and cache for this repo, instead of falling back to the slow GHA runners jobs: test-each-commit: From 1faf918a169b76e69a486eb7fc8d88429b77b4b6 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:37:26 +0100 Subject: [PATCH 037/356] ci: add configure-docker action Github-Pull: #32989 Rebased-From: fdf64e55324 Another action to reduce boilerplate in the main ci.yml file. This action will set up a docker builder compatible with caching build layers to a container registry using the `gha` build driver. It will then configure the docker build cache args. --- .github/actions/configure-docker/action.yml | 52 +++++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 .github/actions/configure-docker/action.yml diff --git a/.github/actions/configure-docker/action.yml b/.github/actions/configure-docker/action.yml new file mode 100644 index 0000000000..c78df86b6c --- /dev/null +++ b/.github/actions/configure-docker/action.yml @@ -0,0 +1,52 @@ +name: 'Configure Docker' +description: 'Set up Docker build driver and configure build cache args' +inputs: + use-cirrus: + description: 'Use cirrus cache' + required: true +runs: + using: 'composite' + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + # Use host network to allow access to cirrus gha cache running on the host + driver-opts: | + network=host + + # This is required to allow buildkit to access the actions cache + - name: Expose actions cache variables + uses: actions/github-script@v6 + with: + script: | + core.exportVariable('ACTIONS_CACHE_URL', process.env['ACTIONS_CACHE_URL']) + core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env['ACTIONS_RUNTIME_TOKEN']) + + - name: Construct docker build cache args + shell: bash + run: | + # Configure docker build cache backend + # + # On forks the gha cache will work but will use Github's cache backend. + # Docker will check for variables $ACTIONS_CACHE_URL, $ACTIONS_RESULTS_URL and $ACTIONS_RUNTIME_TOKEN + # which are set automatically when running on GitHub infra: https://docs.docker.com/build/cache/backends/gha/#synopsis + + # Use cirrus cache host + if [[ ${{ inputs.use-cirrus }} == 'true' ]]; then + url_args="url=${CIRRUS_CACHE_HOST},url_v2=${CIRRUS_CACHE_HOST}" + else + url_args="" + fi + + # Always optimistically --cache‑from in case a cache blob exists + args=(--cache-from "type=gha${url_args:+,${url_args}},scope=${CONTAINER_NAME}") + + # If this is a push to the default branch, also add --cache‑to to save the cache + if [[ ${{ github.event_name }} == "push" && ${{ github.ref_name }} == ${{ github.event.repository.default_branch }} ]]; then + args+=(--cache-to "type=gha${url_args:+,${url_args}},mode=max,ignore-error=true,scope=${CONTAINER_NAME}") + fi + + # Always `--load` into docker images (needed when using the `docker-container` build driver). + args+=(--load) + + echo "DOCKER_BUILD_CACHE_ARG=${args[*]}" >> $GITHUB_ENV From f3089fb2cfdba533fba1298e909628e5fe7dabb9 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 038/356] ci: use buildx in ci Github-Pull: #32989 Rebased-From: 94a09325475 Using buildx is required to properly load the correct driver, for use with registry caching. Neither build, nor BUILDKIT=1 currently do this properly. Use of `docker buildx build` is compatible with podman. --- ci/test/02_run_container.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh index 8351fd4e02..087b4c6780 100755 --- a/ci/test/02_run_container.sh +++ b/ci/test/02_run_container.sh @@ -46,8 +46,10 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then DOCKER_BUILD_CACHE_ARG="--cache-from type=local,src=${DOCKER_BUILD_CACHE_OLD_DIR} --cache-to type=local,dest=${DOCKER_BUILD_CACHE_NEW_DIR},mode=max" fi + # Use buildx unconditionally + # Using buildx is required to properly load the correct driver, for use with registry caching. Neither build, nor BUILDKIT=1 currently do this properly # shellcheck disable=SC2086 - DOCKER_BUILDKIT=1 docker build \ + docker buildx build \ --file "${BASE_READ_ONLY_DIR}/ci/test_imagefile" \ --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \ --build-arg "FILE_ENV=${FILE_ENV}" \ From 0a649d07c994b1a6957131c8bb3a1d2e8d53e559 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 039/356] ci: use docker build cache arg directly Github-Pull: #32989 Rebased-From: 18f6be09d02 Reverts: e87429a2d0f23eb59526d335844fa5ff5b50b21f This was added in PR #31545 with the intention that self-hosted runners might use it to save build cache. As we are not using hosted runners with a registry build cache, the bulk of this commit can be reverted, simply using the value of $DOCKER_BUILD_CACHE_ARG in the script. link: https://github.com/bitcoin/bitcoin/pull/31545 --- ci/test/02_run_container.sh | 32 -------------------------------- 1 file changed, 32 deletions(-) diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh index 087b4c6780..2031dbd85a 100755 --- a/ci/test/02_run_container.sh +++ b/ci/test/02_run_container.sh @@ -23,29 +23,6 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then fi echo "Creating $CI_IMAGE_NAME_TAG container to run in" - DOCKER_BUILD_CACHE_ARG="" - DOCKER_BUILD_CACHE_TEMPDIR="" - DOCKER_BUILD_CACHE_OLD_DIR="" - DOCKER_BUILD_CACHE_NEW_DIR="" - # If set, use an `docker build` cache directory on the CI host - # to cache docker image layers for the CI container image. - # This cache can be multiple GB in size. Prefixed with DANGER - # as setting it removes (old cache) files from the host. - if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then - # Directory where the current cache for this run could be. If not existing - # or empty, "docker build" will warn, but treat it as cache-miss and continue. - DOCKER_BUILD_CACHE_OLD_DIR="${DANGER_DOCKER_BUILD_CACHE_HOST_DIR}/${CONTAINER_NAME}" - # Temporary directory for a newly created cache. We can't write the new - # cache into OLD_DIR directly, as old cache layers would not be removed. - # The NEW_DIR contents are moved to OLD_DIR after OLD_DIR has been cleared. - # This happens after `docker build`. If a task fails or is aborted, the - # DOCKER_BUILD_CACHE_TEMPDIR might be retained on the host. If the host isn't - # ephemeral, it has to take care of cleaning old TEMPDIR's up. - DOCKER_BUILD_CACHE_TEMPDIR="$(mktemp --directory ci-docker-build-cache-XXXXXXXXXX)" - DOCKER_BUILD_CACHE_NEW_DIR="${DOCKER_BUILD_CACHE_TEMPDIR}/${CONTAINER_NAME}" - DOCKER_BUILD_CACHE_ARG="--cache-from type=local,src=${DOCKER_BUILD_CACHE_OLD_DIR} --cache-to type=local,dest=${DOCKER_BUILD_CACHE_NEW_DIR},mode=max" - fi - # Use buildx unconditionally # Using buildx is required to properly load the correct driver, for use with registry caching. Neither build, nor BUILDKIT=1 currently do this properly # shellcheck disable=SC2086 @@ -60,15 +37,6 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then $DOCKER_BUILD_CACHE_ARG \ "${BASE_READ_ONLY_DIR}" - if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then - if [ -e "${DOCKER_BUILD_CACHE_NEW_DIR}/index.json" ]; then - echo "Removing the existing docker build cache in ${DOCKER_BUILD_CACHE_OLD_DIR}" - rm -rf "${DOCKER_BUILD_CACHE_OLD_DIR}" - echo "Moving the contents of ${DOCKER_BUILD_CACHE_NEW_DIR} to ${DOCKER_BUILD_CACHE_OLD_DIR}" - mv "${DOCKER_BUILD_CACHE_NEW_DIR}" "${DOCKER_BUILD_CACHE_OLD_DIR}" - fi - fi - docker volume create "${CONTAINER_NAME}_ccache" || true docker volume create "${CONTAINER_NAME}_depends" || true docker volume create "${CONTAINER_NAME}_depends_sources" || true From af086431e86c82a5e40b05270f39c70cfe413c7b Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 040/356] ci: have base install run in right dir Github-Pull: #32989 Rebased-From: 9c2b96e0d03 This sets the build dir at build time so that Apple SDK gets installed in the correct/expected location for the runtime to find it. Co-authored-by: Max Edwards --- ci/test/02_run_container.sh | 1 + ci/test_imagefile | 3 +++ 2 files changed, 4 insertions(+) diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh index 2031dbd85a..131b3c6148 100755 --- a/ci/test/02_run_container.sh +++ b/ci/test/02_run_container.sh @@ -30,6 +30,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then --file "${BASE_READ_ONLY_DIR}/ci/test_imagefile" \ --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \ --build-arg "FILE_ENV=${FILE_ENV}" \ + --build-arg "BASE_ROOT_DIR=${BASE_ROOT_DIR}" \ $MAYBE_CPUSET \ --platform="${CI_IMAGE_PLATFORM}" \ --label="${CI_IMAGE_LABEL}" \ diff --git a/ci/test_imagefile b/ci/test_imagefile index f8b5eea1c8..224141b138 100644 --- a/ci/test_imagefile +++ b/ci/test_imagefile @@ -10,6 +10,9 @@ FROM ${CI_IMAGE_NAME_TAG} ARG FILE_ENV ENV FILE_ENV=${FILE_ENV} +ARG BASE_ROOT_DIR +ENV BASE_ROOT_DIR=${BASE_ROOT_DIR} + COPY ./ci/retry/retry /usr/bin/retry COPY ./ci/test/00_setup_env.sh ./${FILE_ENV} ./ci/test/01_base_install.sh /ci_container_base/ci/test/ From f9f3e8b68616dfb9e18082d191b87a457c4100da Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 041/356] ci: add Cirrus cache host Github-Pull: #32989 Rebased-From: 020069e6b71 Whilst the action cirruslabs/actions/cache will automatically set this host, the docker `gha` build cache backend will not be aware of it. Set the value here, which will later be used in the docker build args to enable docker build cache on the cirrus cache. --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3ce17dff3e..cf4c02e1d7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,6 +19,7 @@ concurrency: env: CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error + CIRRUS_CACHE_HOST: http://127.0.0.1:12321/ # When using Cirrus Runners this host can be used by the docker `gha` build cache type. MAKEJOBS: '-j10' REPO_USE_CIRRUS_RUNNERS: 'bitcoin/bitcoin' # Use cirrus runners and cache for this repo, instead of falling back to the slow GHA runners From 849993377d76c64cc5ea14336e6523434608deb3 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 042/356] ci: add job to determine runner type Github-Pull: #32989 Rebased-From: cc1735d7771 To remove multiple occurances of the respository name, against which we compare `${{ github.repository }}` to check if we should use Cirrus Runners, introduce a helper job which can check a single environment variable and output this as an input to subsequent jobs. Forks can maintain a trivial patch of their repo name against the `REPO_USE_CIRRUS_RUNNERS` variable in ci.yml if they have Cirrus Runners of their own, which will then enable cache actions and docker build cache to use Cirrus Cache. It's not possible to use `${{ env.USE_CIRRUS_RUNNERS }}` in the `runs-on:` directive as the context is not supported by GitHub. If it was, this job would no longer be necessary. --- .github/workflows/ci.yml | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cf4c02e1d7..4bd496d7a8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,6 +24,22 @@ env: REPO_USE_CIRRUS_RUNNERS: 'bitcoin/bitcoin' # Use cirrus runners and cache for this repo, instead of falling back to the slow GHA runners jobs: + runners: + name: 'determine runners' + runs-on: ubuntu-latest + outputs: + use-cirrus-runners: ${{ steps.runners.outputs.use-cirrus-runners }} + steps: + - id: runners + run: | + if [[ "${REPO_USE_CIRRUS_RUNNERS}" == "${{ github.repository }}" ]]; then + echo "use-cirrus-runners=true" >> "$GITHUB_OUTPUT" + echo "::notice title=Runner Selection::Using Cirrus Runners" + else + echo "use-cirrus-runners=false" >> "$GITHUB_OUTPUT" + echo "::notice title=Runner Selection::Using GitHub-hosted runners" + fi + test-each-commit: name: 'test each commit' runs-on: ubuntu-24.04 From 82c60a31515a2004976faaa26f8caad9e2bb022d Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 043/356] ci: port arm 32-bit job Github-Pull: #32989 Rebased-From: f253031cb8e Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 43 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4bd496d7a8..297f8c0eec 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -324,3 +324,46 @@ jobs: path: ${{ env.CCACHE_DIR }} # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache key: ${{ github.job }}-ccache-${{ github.run_id }} + + ci-matrix: + name: ${{ matrix.name }} + needs: runners + runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && matrix.cirrus-runner || matrix.fallback-runner }} + if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} + timeout-minutes: ${{ matrix.timeout-minutes }} + + env: + DANGER_CI_ON_HOST_FOLDERS: 1 + FILE_ENV: ${{ matrix.file-env }} + + strategy: + fail-fast: false + matrix: + include: + - name: '32 bit ARM, unit tests, no functional tests' + cirrus-runner: 'ubuntu-24.04-arm' # Cirrus' Arm runners are Apple (with virtual Linux aarch64), which doesn't support 32-bit mode + fallback-runner: 'ubuntu-24.04-arm' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_arm.sh' + + steps: + - name: Checkout + uses: actions/checkout@v5 + + - name: Configure environment + uses: ./.github/actions/configure-environment + + - name: Restore caches + id: restore-cache + uses: ./.github/actions/restore-caches + + - name: Configure Docker + uses: ./.github/actions/configure-docker + with: + use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }} + + - name: CI script + run: ./ci/test_run_all.sh + + - name: Save caches + uses: ./.github/actions/save-caches From 894a3cbe42bf900788b858faf59b3d97412e7d47 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 044/356] ci: update windows-cross job Github-Pull: #32989 Rebased-From: 04e7bfbceb0 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 297f8c0eec..d88bb462e3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -346,6 +346,11 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_arm.sh' + - name: 'win64 Cross' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_win64.sh' steps: - name: Checkout uses: actions/checkout@v5 From 819ee09af31687dedd38de68aef98b0ecc19608f Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 045/356] ci: update asan-lsan-ubsan Github-Pull: #32989 Rebased-From: 884251441bb Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 55 ++++++++++------------------------------ 1 file changed, 13 insertions(+), 42 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d88bb462e3..0dab0e8a5f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -283,48 +283,6 @@ jobs: run: | py -3 test\fuzz\test_runner.py --par %NUMBER_OF_PROCESSORS% --loglevel DEBUG %RUNNER_TEMP%\qa-assets\fuzz_corpora - asan-lsan-ubsan-integer-no-depends-usdt: - name: 'ASan + LSan + UBSan + integer, no depends, USDT' - runs-on: ubuntu-24.04 # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools - if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} - timeout-minutes: 120 - env: - FILE_ENV: "./ci/test/00_setup_env_native_asan.sh" - DANGER_CI_ON_HOST_FOLDERS: 1 - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Set CI directories - run: | - echo "CCACHE_DIR=${{ runner.temp }}/ccache_dir" >> "$GITHUB_ENV" - echo "BASE_ROOT_DIR=${{ runner.temp }}" >> "$GITHUB_ENV" - echo "BASE_BUILD_DIR=${{ runner.temp }}/build-asan" >> "$GITHUB_ENV" - - - name: Restore Ccache cache - id: ccache-cache - uses: actions/cache/restore@v4 - with: - path: ${{ env.CCACHE_DIR }} - key: ${{ github.job }}-ccache-${{ github.run_id }} - restore-keys: ${{ github.job }}-ccache- - - - name: Enable bpfcc script - # In the image build step, no external environment variables are available, - # so any settings will need to be written to the settings env file: - run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh - - - name: CI script - run: ./ci/test_run_all.sh - - - name: Save Ccache cache - uses: actions/cache/save@v4 - if: github.event_name != 'pull_request' && steps.ccache-cache.outputs.cache-hit != 'true' - with: - path: ${{ env.CCACHE_DIR }} - # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache - key: ${{ github.job }}-ccache-${{ github.run_id }} - ci-matrix: name: ${{ matrix.name }} needs: runners @@ -351,6 +309,13 @@ jobs: fallback-runner: 'ubuntu-24.04' timeout-minutes: 120 file-env: './ci/test/00_setup_env_win64.sh' + + - name: 'ASan + LSan + UBSan + integer, no depends, USDT' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_asan.sh' + steps: - name: Checkout uses: actions/checkout@v5 @@ -367,6 +332,12 @@ jobs: with: use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }} + - name: Enable bpfcc script + if: ${{ env.CONTAINER_NAME == 'ci_native_asan' }} + # In the image build step, no external environment variables are available, + # so any settings will need to be written to the settings env file: + run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh + - name: CI script run: ./ci/test_run_all.sh From a91567a980adb93a05f12ec63b628ee3faaa4681 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 046/356] ci: force reinstall of kernel headers in asan Github-Pull: #32989 Rebased-From: 2c990d84a3d When using hosted runners in combination with cached docker images, there is the possibility that the host runner image is updated, rendering the linux-headers package (stored in the cached docker image) incompatible. Fix this by doing a re-install of the headers package in 03_test_script.sh. If the underlying runner kernel has not changed thie has no effect, but prevents the job from failing if it has. --- ci/test/03_test_script.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/ci/test/03_test_script.sh b/ci/test/03_test_script.sh index b218e7b9d1..c2ef2291bd 100755 --- a/ci/test/03_test_script.sh +++ b/ci/test/03_test_script.sh @@ -24,6 +24,14 @@ fi echo "Free disk space:" df -h +# We force an install of linux-headers again here via $PACKAGES to fix any +# kernel mismatch between a cached docker image and the underlying host. +# This can happen occasionally on hosted runners if the runner image is updated. +if [[ "$CONTAINER_NAME" == "ci_native_asan" ]]; then + $CI_RETRY_EXE apt-get update + ${CI_RETRY_EXE} bash -c "apt-get install --no-install-recommends --no-upgrade -y $PACKAGES" +fi + # What host to compile for. See also ./depends/README.md # Tests that need cross-compilation export the appropriate HOST. # Tests that run natively guess the host From 835b5b8bb18a318026ada74d3c63b89d6aab742b Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 047/356] ci: port mac-cross-gui-notests Github-Pull: #32989 Rebased-From: 9c2514de534 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0dab0e8a5f..941ab09509 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -316,6 +316,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_asan.sh' + - name: 'macOS-cross, gui, no tests' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_mac_cross.sh' + steps: - name: Checkout uses: actions/checkout@v5 From e826c3daa55d3b4cbd0e2c13765f9158eb225bfd Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 048/356] ci: port nowallet-libbitcoinkernel Github-Pull: #32989 Rebased-From: 2a00b12d73b Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 941ab09509..37556d42c9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -322,6 +322,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_mac_cross.sh' + - name: 'No wallet, libbitcoinkernel' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh' + steps: - name: Checkout uses: actions/checkout@v5 From 544f902b2a9cf14cd0445f27802cd11c5c945b00 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 049/356] ci: port i686-multiprocess-DEBUG Github-Pull: #32989 Rebased-From: f2068f26c12 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 37556d42c9..5b15fdb265 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -328,6 +328,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh' + - name: 'i686, multiprocess, DEBUG' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_i686_multiprocess.sh' + steps: - name: Checkout uses: actions/checkout@v5 From 85ec6c6882b40adb35c9cb88d37d22e3e58eaa68 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 050/356] ci: port fuzzer-address-undefined-integer-nodepends Github-Pull: #32989 Rebased-From: 341196d75c3 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5b15fdb265..1eabc0ec8a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -334,6 +334,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_i686_multiprocess.sh' + - name: 'fuzzer,address,undefined,integer, no depends' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 240 + file-env: './ci/test/00_setup_env_native_fuzz.sh' + steps: - name: Checkout uses: actions/checkout@v5 From 5057b9a6ffd360dbd96ad8585e10852961392361 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 051/356] ci: port previous-releases-depends-debug Github-Pull: #32989 Rebased-From: 58e38c3a042 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1eabc0ec8a..ca8748fd80 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -340,6 +340,12 @@ jobs: timeout-minutes: 240 file-env: './ci/test/00_setup_env_native_fuzz.sh' + - name: 'previous releases, depends DEBUG' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_previous_releases.sh' + steps: - name: Checkout uses: actions/checkout@v5 From b4286cf354a8111ed54fb63547dc1a7be7257b92 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 052/356] ci: port centos-depends-gui Github-Pull: #32989 Rebased-From: 549074bc643 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ca8748fd80..69fe9f0f97 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -346,6 +346,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_previous_releases.sh' + - name: 'CentOS, depends, gui' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_centos.sh' + steps: - name: Checkout uses: actions/checkout@v5 From 3b2dcc8b9aea8706a25690a0cd08ba60896d3542 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 053/356] ci: port tidy Github-Pull: #32989 Rebased-From: bf7d5364527 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 69fe9f0f97..65903b40fd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -352,6 +352,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_centos.sh' + - name: 'tidy' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_tidy.sh' + steps: - name: Checkout uses: actions/checkout@v5 From 643385b22d9908f7665bf2addc734ba0323967b0 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 054/356] ci: port tsan-depends Github-Pull: #32989 Rebased-From: 9bbae61e3b4 Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 65903b40fd..95d5686f2f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -358,6 +358,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_tidy.sh' + - name: 'TSan, depends, no gui' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_tsan.sh' + steps: - name: Checkout uses: actions/checkout@v5 @@ -380,6 +386,11 @@ jobs: # so any settings will need to be written to the settings env file: run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh + - name: Set mmap_rnd_bits + if: ${{ env.CONTAINER_NAME == 'ci_native_tsan' }} + # Prevents crashes due to high ASLR entropy + run: sudo sysctl -w vm.mmap_rnd_bits=28 + - name: CI script run: ./ci/test_run_all.sh From 0f0378fe3c590e835aa30be092f37109ddd63b86 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 055/356] ci: port msan-depends Github-Pull: #32989 Rebased-From: d290a8e6eab Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 95d5686f2f..607d96f966 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -364,6 +364,12 @@ jobs: timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_tsan.sh' + - name: 'MSan, depends' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + fallback-runner: 'ubuntu-24.04' + timeout-minutes: 120 + file-env: './ci/test/00_setup_env_native_msan.sh' + steps: - name: Checkout uses: actions/checkout@v5 @@ -387,7 +393,7 @@ jobs: run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh - name: Set mmap_rnd_bits - if: ${{ env.CONTAINER_NAME == 'ci_native_tsan' }} + if: ${{ env.CONTAINER_NAME == 'ci_native_tsan' || env.CONTAINER_NAME == 'ci_native_msan' }} # Prevents crashes due to high ASLR entropy run: sudo sysctl -w vm.mmap_rnd_bits=28 From 06424fb004f916b06e4f0ab90fd6f7623049a360 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 056/356] ci: port lint Github-Pull: #32989 Rebased-From: bc41848d00f Co-authored-by: Max Edwards --- .github/workflows/ci.yml | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 607d96f966..6d9f920e68 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -402,3 +402,32 @@ jobs: - name: Save caches uses: ./.github/actions/save-caches + + lint: + name: 'lint' + needs: runners + runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' || 'ubuntu-24.04' }} + if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} + timeout-minutes: 20 + env: + CONTAINER_NAME: "bitcoin-linter" + steps: + - name: Checkout + uses: actions/checkout@v5 + with: + fetch-depth: 0 + + - name: Configure Docker + uses: ./.github/actions/configure-docker + with: + use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }} + + - name: CI script + run: | + set -o xtrace + docker buildx build -t "$CONTAINER_NAME" $DOCKER_BUILD_CACHE_ARG --file "./ci/lint_imagefile" . + CIRRUS_PR_FLAG="" + if [ "${{ github.event_name }}" = "pull_request" ]; then + CIRRUS_PR_FLAG="-e CIRRUS_PR=1" + fi + docker run --rm $CIRRUS_PR_FLAG -v "$(pwd)":/bitcoin "$CONTAINER_NAME" From a08c3cc51c6875ba67f25c85143fdb61a8ba3e03 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 057/356] ci: remove .cirrus.yml Github-Pull: #32989 Rebased-From: 4393ffdd837 Removed as unused. --- .cirrus.yml | 214 ---------------------------------------------------- 1 file changed, 214 deletions(-) delete mode 100644 .cirrus.yml diff --git a/.cirrus.yml b/.cirrus.yml deleted file mode 100644 index 6e70dc15fe..0000000000 --- a/.cirrus.yml +++ /dev/null @@ -1,214 +0,0 @@ -env: # Global defaults - CIRRUS_CLONE_DEPTH: 1 - CIRRUS_LOG_TIMESTAMP: true - MAKEJOBS: "-j10" - TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache - CI_FAILFAST_TEST_LEAVE_DANGLING: "1" # Cirrus CI does not care about dangling processes and setting this variable avoids killing the CI script itself on error - -# A self-hosted machine(s) can be used via Cirrus CI. It can be configured with -# multiple users to run tasks in parallel. No sudo permission is required. -# -# https://cirrus-ci.org/guide/persistent-workers/ -# -# Generally, a persistent worker must run Ubuntu 23.04+ or Debian 12+. -# -# The following specific types should exist, with the following requirements: -# - small: For an x86_64 machine, with at least 2 vCPUs and 8 GB of memory. -# - medium: For an x86_64 machine, with at least 4 vCPUs and 16 GB of memory. -# - arm64: For an aarch64 machine, with at least 2 vCPUs and 8 GB of memory. -# -# CI jobs for the latter configuration can be run on x86_64 hardware -# by installing qemu-user-static, which works out of the box with -# podman or docker. Background: https://stackoverflow.com/a/72890225/313633 -# -# The above machine types are matched to each task by their label. Refer to the -# Cirrus CI docs for more details. -# -# When a contributor maintains a fork of the repo, any pull request they make -# to their own fork, or to the main repository, will trigger two CI runs: -# one for the branch push and one for the pull request. -# This can be avoided by setting SKIP_BRANCH_PUSH=true as a custom env variable -# in Cirrus repository settings, accessible from -# https://cirrus-ci.com/github/my-organization/my-repository -# -# On machines that are persisted between CI jobs, RESTART_CI_DOCKER_BEFORE_RUN=1 -# ensures that previous containers and artifacts are cleared before each run. -# This requires installing Podman instead of Docker. -# -# Futhermore: -# - podman-docker-4.1+ is required due to the bugfix in 4.1 -# (https://github.com/bitcoin/bitcoin/pull/21652#issuecomment-1657098200) -# - The ./ci/ dependencies (with cirrus-cli) should be installed. One-liner example -# for a single user setup with sudo permission: -# -# ``` -# apt update && apt install git screen python3 bash podman-docker uidmap slirp4netns curl -y && curl -L -o cirrus "https://github.com/cirruslabs/cirrus-cli/releases/latest/download/cirrus-linux-$(dpkg --print-architecture)" && mv cirrus /usr/local/bin/cirrus && chmod +x /usr/local/bin/cirrus -# ``` -# -# - There are no strict requirements on the hardware. Having fewer CPU threads -# than recommended merely causes the CI script to run slower. -# To avoid rare and intermittent OOM due to short memory usage spikes, -# it is recommended to add (and persist) swap: -# -# ``` -# fallocate -l 16G /swapfile_ci && chmod 600 /swapfile_ci && mkswap /swapfile_ci && swapon /swapfile_ci && ( echo '/swapfile_ci none swap sw 0 0' | tee -a /etc/fstab ) -# ``` -# -# - To register the persistent worker, open a `screen` session and run: -# -# ``` -# RESTART_CI_DOCKER_BEFORE_RUN=1 screen cirrus worker run --labels type=todo_fill_in_type --token todo_fill_in_token -# ``` - -# https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks -filter_template: &FILTER_TEMPLATE - # Allow forks to specify SKIP_BRANCH_PUSH=true and skip CI runs when a branch is pushed, - # but still run CI when a PR is created. - # https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution - skip: $SKIP_BRANCH_PUSH == "true" && $CIRRUS_PR == "" - stateful: false # https://cirrus-ci.org/guide/writing-tasks/#stateful-tasks - -base_template: &BASE_TEMPLATE - << : *FILTER_TEMPLATE - merge_base_script: - # Require git (used in fingerprint_script). - - git --version || ( apt-get update && apt-get install -y git ) - - if [ "$CIRRUS_PR" = "" ]; then exit 0; fi - - git fetch --depth=1 $CIRRUS_REPO_CLONE_URL "pull/${CIRRUS_PR}/merge" - - git checkout FETCH_HEAD # Use merged changes to detect silent merge conflicts - # Also, the merge commit is used to lint COMMIT_RANGE="HEAD~..HEAD" - -main_template: &MAIN_TEMPLATE - timeout_in: 120m # https://cirrus-ci.org/faq/#instance-timed-out - ci_script: - - ./ci/test_run_all.sh - -global_task_template: &GLOBAL_TASK_TEMPLATE - << : *BASE_TEMPLATE - << : *MAIN_TEMPLATE - -compute_credits_template: &CREDITS_TEMPLATE - # https://cirrus-ci.org/pricing/#compute-credits - # Only use credits for pull requests to the main repo - use_compute_credits: $CIRRUS_REPO_FULL_NAME == 'bitcoin/bitcoin' && $CIRRUS_PR != "" - -task: - name: 'lint' - << : *BASE_TEMPLATE - container: - image: debian:bookworm - cpu: 1 - memory: 1G - # For faster CI feedback, immediately schedule the linters - << : *CREDITS_TEMPLATE - test_runner_cache: - folder: "/lint_test_runner" - fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-parse HEAD:test/lint/test_runner) - python_cache: - folder: "/python_build" - fingerprint_script: cat .python-version /etc/os-release - unshallow_script: - - git fetch --unshallow --no-tags - lint_script: - - ./ci/lint_run_all.sh - -task: - name: 'tidy' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - env: - FILE_ENV: "./ci/test/00_setup_env_native_tidy.sh" - -task: - name: 'ARM, unit tests, no functional tests' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: arm64 # Use arm64 worker to sidestep qemu and avoid a slow CI: https://github.com/bitcoin/bitcoin/pull/28087#issuecomment-1649399453 - env: - FILE_ENV: "./ci/test/00_setup_env_arm.sh" - -task: - name: 'Win64-cross' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_win64.sh" - -task: - name: 'CentOS, depends, gui' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_native_centos.sh" - -task: - name: 'previous releases, depends DEBUG' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_native_previous_releases.sh" - -task: - name: 'TSan, depends, no gui' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - env: - FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh" - -task: - name: 'MSan, depends' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - timeout_in: 300m # Use longer timeout for the *rare* case where a full build (llvm + msan + depends + ...) needs to be done. - env: - FILE_ENV: "./ci/test/00_setup_env_native_msan.sh" - -task: - name: 'fuzzer,address,undefined,integer, no depends' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - timeout_in: 240m # larger timeout, due to the high CPU demand - env: - FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh" - -task: - name: 'multiprocess, i686, DEBUG' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: medium - env: - FILE_ENV: "./ci/test/00_setup_env_i686_multiprocess.sh" - -task: - name: 'no wallet, libbitcoinkernel' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh" - -task: - name: 'macOS-cross, gui, no tests' - << : *GLOBAL_TASK_TEMPLATE - persistent_worker: - labels: - type: small - env: - FILE_ENV: "./ci/test/00_setup_env_mac_cross.sh" From c7f290b826fc4928c6e1e0a9649da85d4752717b Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 058/356] ci: dynamically match makejobs with cores Github-Pull: #32989 Rebased-From: 3f339e99e00 Previously jobs were running on a large multi-core server where 10 jobs as default made sense (or may even have been on the low side). Using hosted runners with fixed (and lower) numbers of vCPUs we should adapt compilation to match the number of cpus we have dynamically. This is cross-platform compatible with macos and linux only. --- .github/workflows/ci.yml | 1 - ci/test/00_setup_env.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6d9f920e68..60cb41b8f9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -20,7 +20,6 @@ concurrency: env: CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error CIRRUS_CACHE_HOST: http://127.0.0.1:12321/ # When using Cirrus Runners this host can be used by the docker `gha` build cache type. - MAKEJOBS: '-j10' REPO_USE_CIRRUS_RUNNERS: 'bitcoin/bitcoin' # Use cirrus runners and cache for this repo, instead of falling back to the slow GHA runners jobs: diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh index 9f794c2523..8a5cd4b243 100755 --- a/ci/test/00_setup_env.sh +++ b/ci/test/00_setup_env.sh @@ -35,7 +35,7 @@ fi echo "Fallback to default values in env (if not yet set)" # The number of parallel jobs to pass down to make and test_runner.py -export MAKEJOBS=${MAKEJOBS:--j4} +export MAKEJOBS=${MAKEJOBS:--j$(if command -v nproc > /dev/null 2>&1; then nproc; else sysctl -n hw.logicalcpu; fi)} # Whether to prefer BusyBox over GNU utilities export USE_BUSY_BOX=${USE_BUSY_BOX:-false} From 4339787379d2d246846e60f10ab9582805a6845e Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 059/356] doc: Detail configuration of hosted CI runners Github-Pull: #32989 Rebased-From: f4272844833dd660c2b9db587856baa408889302 --- ci/README.md | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/ci/README.md b/ci/README.md index 377aae7fa0..81e048ce68 100644 --- a/ci/README.md +++ b/ci/README.md @@ -1,8 +1,8 @@ -## CI Scripts +# CI Scripts This directory contains scripts for each build step in each build stage. -### Running a Stage Locally +## Running a Stage Locally Be aware that the tests will be built and run in-place, so please run at your own risk. If the repository is not a fresh git clone, you might have to clean files from previous builds or test runs first. @@ -27,7 +27,7 @@ with a specific configuration, env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh' ``` -### Configurations +## Configurations The test files (`FILE_ENV`) are constructed to test a wide range of configurations, rather than a single pass/fail. This helps to catch build @@ -49,8 +49,32 @@ env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'MAKEJOBS="-j1" FILE_ENV=" The files starting with `0n` (`n` greater than 0) are the scripts that are run in order. -### Cache +## Cache In order to avoid rebuilding all dependencies for each build, the binaries are cached and reused when possible. Changes in the dependency-generator will trigger cache-invalidation and rebuilds as necessary. + +## Configuring a repository for CI + +### Primary repository + +To configure the primary repository, follow these steps: + +1. Register with [Cirrus Runners](https://cirrus-runners.app/) and purchase runners. +2. Install the Cirrus Runners GitHub app against the GitHub organization. +3. Enable organisation-level runners to be used in public repositories: + 1. `Org settings -> Actions -> Runner Groups -> Default -> Allow public repos` +4. Permit the following actions to run: + 1. cirruslabs/cache/restore@\* + 1. cirruslabs/cache/save@\* + 1. docker/setup-buildx-action@\* + 1. actions/github-script@\* + +### Forked repositories + +When used in a fork the CI will run on GitHub's free hosted runners by default. +In this case, due to GitHub's 10GB-per-repo cache size limitations caches will be frequently evicted and missed, but the workflows will run (slowly). + +It is also possible to use your own Cirrus Runners in your own fork with an appropriate patch to the `REPO_USE_CIRRUS_RUNNERS` variable in ../.github/workflows/ci.yml +NB that Cirrus Runners only work at an organisation level, therefore in order to use your own Cirrus Runners, *the fork must be within your own organisation*. From 773e4cda9446a03c0b23468ef8a9e38496b4566b Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 060/356] ci: add ccache hit-rate warning when < 75% Github-Pull: #32989 Rebased-From: dd1c5903e8d Print the ccache hit-rate for the job using a GitHub annotation if it was below 75%. --- ci/test/00_setup_env_mac_native.sh | 1 + ci/test/00_setup_env_mac_native_fuzz.sh | 1 + ci/test/03_test_script.sh | 6 ++++++ 3 files changed, 8 insertions(+) diff --git a/ci/test/00_setup_env_mac_native.sh b/ci/test/00_setup_env_mac_native.sh index e01a56895b..9de51f9329 100755 --- a/ci/test/00_setup_env_mac_native.sh +++ b/ci/test/00_setup_env_mac_native.sh @@ -8,6 +8,7 @@ export LC_ALL=C.UTF-8 # Homebrew's python@3.12 is marked as externally managed (PEP 668). # Therefore, `--break-system-packages` is needed. +export CONTAINER_NAME="ci_mac_native" # macos does not use a container, but the env var is needed for logging export PIP_PACKAGES="--break-system-packages zmq" export GOAL="install" export CMAKE_GENERATOR="Ninja" diff --git a/ci/test/00_setup_env_mac_native_fuzz.sh b/ci/test/00_setup_env_mac_native_fuzz.sh index cacf2423ac..22b6bc97ab 100755 --- a/ci/test/00_setup_env_mac_native_fuzz.sh +++ b/ci/test/00_setup_env_mac_native_fuzz.sh @@ -6,6 +6,7 @@ export LC_ALL=C.UTF-8 +export CONTAINER_NAME="ci_mac_native_fuzz" # macos does not use a container, but the env var is needed for logging export CMAKE_GENERATOR="Ninja" export BITCOIN_CONFIG="-DBUILD_FOR_FUZZING=ON" export CI_OS_NAME="macos" diff --git a/ci/test/03_test_script.sh b/ci/test/03_test_script.sh index c2ef2291bd..36f8b9dfc2 100755 --- a/ci/test/03_test_script.sh +++ b/ci/test/03_test_script.sh @@ -137,6 +137,12 @@ bash -c "cmake -S $BASE_ROOT_DIR $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( (cat $ bash -c "cmake --build . $MAKEJOBS --target all $GOAL" || ( echo "Build failure. Verbose build follows." && cmake --build . --target all "$GOAL" --verbose ; false ) bash -c "${PRINT_CCACHE_STATISTICS}" +if [ "$CI" = "true" ]; then + hit_rate=$(ccache -s | grep "Hits:" | head -1 | sed 's/.*(\(.*\)%).*/\1/') + if [ "${hit_rate%.*}" -lt 75 ]; then + echo "::notice title=low ccache hitrate::Ccache hit-rate in $CONTAINER_NAME was $hit_rate%" + fi +fi du -sh "${DEPENDS_DIR}"/*/ du -sh "${PREVIOUS_RELEASES_DIR}" From 4e8b64b181e1bb7d82789699eaac24dc1242afa3 Mon Sep 17 00:00:00 2001 From: will Date: Tue, 5 Aug 2025 14:41:09 +0100 Subject: [PATCH 061/356] ci: fix annoying docker warning Github-Pull: #32989 Rebased-From: 2aa288efdda Docker currently warns that we are missing a default value. Set this to scratch which will error if an appropriate image tag is not passed in to silence the warning. --- ci/test_imagefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci/test_imagefile b/ci/test_imagefile index 224141b138..f9cf3187a2 100644 --- a/ci/test_imagefile +++ b/ci/test_imagefile @@ -4,7 +4,8 @@ # See ci/README.md for usage. -ARG CI_IMAGE_NAME_TAG +# We never want scratch, but default arg silences a Warning +ARG CI_IMAGE_NAME_TAG=scratch FROM ${CI_IMAGE_NAME_TAG} ARG FILE_ENV From 6ded1fe11752372c52169d2aec7f0658bf9b0455 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Fri, 8 Aug 2025 10:31:56 +0200 Subject: [PATCH 062/356] ci: remove un-needed lint_run*.sh files Github-Pull: #32989 Rebased-From: 3c5da69a232 ci/lint_run_all.sh: Only used in .cirrus.yml. Refer to test/lint/README.md on how to run locally. --- ci/lint_run_all.sh | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100755 ci/lint_run_all.sh diff --git a/ci/lint_run_all.sh b/ci/lint_run_all.sh deleted file mode 100755 index c57261d21a..0000000000 --- a/ci/lint_run_all.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019-present The Bitcoin Core developers -# Distributed under the MIT software license, see the accompanying -# file COPYING or http://www.opensource.org/licenses/mit-license.php. - -export LC_ALL=C.UTF-8 - -# Only used in .cirrus.yml. Refer to test/lint/README.md on how to run locally. - -cp "./ci/retry/retry" "/ci_retry" -cp "./.python-version" "/.python-version" -mkdir --parents "/test/lint" -cp --recursive "./test/lint/test_runner" "/test/lint/" -set -o errexit; source ./ci/lint/04_install.sh -set -o errexit -./ci/lint/06_script.sh From 4a034cbeb42763c6b7a82089973c4a30cb0cd1c4 Mon Sep 17 00:00:00 2001 From: will Date: Thu, 4 Sep 2025 19:53:45 +0100 Subject: [PATCH 063/356] ci: reduce runner sizes on various jobs Github-Pull: #33319 Rebased-From: 5eeb2facbbbbf68a2c30ef9e6747e39c85d7b116 These jobs can use reduced runner size to avoid wasting CPU, as much of the long-running part of the job is single-threaded. Suggested in: https://github.com/bitcoin/bitcoin/pull/32989#discussion_r2321775620 Co-authored-by: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> --- .github/workflows/ci.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 60cb41b8f9..2b774b6afd 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -310,7 +310,7 @@ jobs: file-env: './ci/test/00_setup_env_win64.sh' - name: 'ASan + LSan + UBSan + integer, no depends, USDT' - cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools fallback-runner: 'ubuntu-24.04' timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_asan.sh' @@ -322,7 +322,7 @@ jobs: file-env: './ci/test/00_setup_env_mac_cross.sh' - name: 'No wallet, libbitcoinkernel' - cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' fallback-runner: 'ubuntu-24.04' timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh' @@ -358,7 +358,7 @@ jobs: file-env: './ci/test/00_setup_env_native_tidy.sh' - name: 'TSan, depends, no gui' - cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg' + cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' fallback-runner: 'ubuntu-24.04' timeout-minutes: 120 file-env: './ci/test/00_setup_env_native_tsan.sh' @@ -405,7 +405,7 @@ jobs: lint: name: 'lint' needs: runners - runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm' || 'ubuntu-24.04' }} + runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-xs' || 'ubuntu-24.04' }} if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }} timeout-minutes: 20 env: From 78d93effd03278b46e21ae8ef79f61f4ec32f855 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Thu, 4 Sep 2025 11:21:45 +0200 Subject: [PATCH 064/356] ci: Checkout latest merged pulls Github-Pull: #33303 Rebased-From: fa8f081af31 --- .github/workflows/ci.yml | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2b774b6afd..ae614d5bb2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -123,8 +123,12 @@ jobs: BASE_ROOT_DIR: ${{ github.workspace }} steps: - - name: Checkout - uses: actions/checkout@v4 + - &CHECKOUT + name: Checkout + uses: actions/checkout@v5 + with: + # Ensure the latest merged pull request state is used, even on re-runs. + ref: &CHECKOUT_REF_TMPL ${{ github.event_name == 'pull_request' && github.ref || '' }} - name: Clang version run: | @@ -192,8 +196,7 @@ jobs: job-name: 'Win64 native fuzz, VS 2022' steps: - - name: Checkout - uses: actions/checkout@v4 + - *CHECKOUT - name: Configure Developer Command Prompt for Microsoft Visual C++ # Using microsoft/setup-msbuild is not enough. @@ -370,8 +373,7 @@ jobs: file-env: './ci/test/00_setup_env_native_msan.sh' steps: - - name: Checkout - uses: actions/checkout@v5 + - *CHECKOUT - name: Configure environment uses: ./.github/actions/configure-environment @@ -414,6 +416,7 @@ jobs: - name: Checkout uses: actions/checkout@v5 with: + ref: *CHECKOUT_REF_TMPL fetch-depth: 0 - name: Configure Docker From 5750355139eb7fc2bd11124adf46bf053be6b690 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 1 Aug 2025 09:48:30 +0100 Subject: [PATCH 065/356] ci: link against -lstdc++ in native fuzz with msan job Github-Pull: #33425 Rebased-From: b77137a5644e09a08442aed7d8a4a9290fb53526 --- ci/test/00_setup_env_native_fuzz_with_msan.sh | 6 ++++-- ci/test/01_base_install.sh | 17 ----------------- 2 files changed, 4 insertions(+), 19 deletions(-) diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh index 27b704017c..655fe609c0 100755 --- a/ci/test/00_setup_env_native_fuzz_with_msan.sh +++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh @@ -7,14 +7,16 @@ export LC_ALL=C.UTF-8 export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04" +export APT_LLVM_V="21" LIBCXX_DIR="/cxx_build/" export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls" -LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument" +# -lstdc++ to resolve link issues due to upstream packaging +LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument -lstdc++" export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}" export CONTAINER_NAME="ci_native_fuzz_msan" -export PACKAGES="ninja-build" # BDB generates false-positives and will be removed in future +export PACKAGES="ninja-build clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev" export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'" export GOAL="all" # Setting CMAKE_{C,CXX}_FLAGS_DEBUG flags to an empty string ensures that the flags set in MSAN_FLAGS remain unaltered. diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh index 1b624f3894..65f68351c8 100755 --- a/ci/test/01_base_install.sh +++ b/ci/test/01_base_install.sh @@ -57,23 +57,6 @@ fi if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then ${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.1" /llvm-project - if [ -n "${APT_LLVM_V}" ]; then - - cmake -G Ninja -B /clang_build/ \ - -DLLVM_ENABLE_PROJECTS="clang" \ - -DCMAKE_BUILD_TYPE=Release \ - -DLLVM_TARGETS_TO_BUILD=Native \ - -DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \ - -S /llvm-project/llvm - - ninja -C /clang_build/ "$MAKEJOBS" - ninja -C /clang_build/ install-runtimes - - update-alternatives --install /usr/bin/clang++ clang++ /clang_build/bin/clang++ 100 - update-alternatives --install /usr/bin/clang clang /clang_build/bin/clang 100 - update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /clang_build/bin/llvm-symbolizer 100 - fi - cmake -G Ninja -B /cxx_build/ \ -DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \ -DCMAKE_BUILD_TYPE=Release \ From 118abf4c305c01e6359a8588327a0b011ca52944 Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Sat, 20 Sep 2025 21:32:41 +0200 Subject: [PATCH 066/356] test: add block 2016 to mock mainnet The next commit requires an additional mainnet block which changes the difficulty. Also fix a few minor mistakes in the test (suite): - rename the create_coinbase retarger_period argument to halving_period. Before bitcoin#31583 this was hardcoded for regtest where these values are the same. - drop unused fees argument from mine helper Finally the CPU miner instructions for generating the alternative mainnet chain are expanded. Github-Pull: #33446 Rebased-From: 4c3c1f42cf705e039751395799240da33ca969bd --- test/functional/data/README.md | 14 +++++++++++--- test/functional/data/mainnet_alt.json | 6 ++++-- test/functional/mining_mainnet.py | 19 +++++++++++++------ test/functional/test_framework/blocktools.py | 4 ++-- 4 files changed, 30 insertions(+), 13 deletions(-) diff --git a/test/functional/data/README.md b/test/functional/data/README.md index bb03422f95..956394e385 100644 --- a/test/functional/data/README.md +++ b/test/functional/data/README.md @@ -11,9 +11,10 @@ The alternate mainnet chain was generated as follows: - restart node with a faketime 2 minutes later ```sh -for i in {1..2015} +for i in {1..2016} do - faketime "`date -d @"$(( 1231006505 + $i * 120 ))" +'%Y-%m-%d %H:%M:%S'`" \ + t=$(( 1231006505 + $i * 120 )) + faketime "`date -d @$t +'%Y-%m-%d %H:%M:%S'`" \ bitcoind -connect=0 -nocheckpoints -stopatheight=$i done ``` @@ -21,7 +22,9 @@ done The CPU miner is kept running as follows: ```sh -./minerd --coinbase-addr 1NQpH6Nf8QtR2HphLRcvuVqfhXBXsiWn8r --no-stratum --algo sha256d --no-longpoll --scantime 3 --retry-pause 1 +./minerd -u ... -p ... -o http://127.0.0.1:8332 --no-stratum \ + --coinbase-addr 1NQpH6Nf8QtR2HphLRcvuVqfhXBXsiWn8r \ + --algo sha256d --no-longpoll --scantime 3 --retry-pause 1 ``` The payout address is derived from first BIP32 test vector master key: @@ -40,3 +43,8 @@ The timestamp was not kept constant because at difficulty 1 it's not sufficient to only grind the nonce. Grinding the extra_nonce or version field instead would have required additional (stratum) software. It would also make it more complicated to reconstruct the blocks in this test. + +The `getblocktemplate` RPC code needs to be patched to ignore not being connected +to any peers, and to ignore the IBD status check. + +On macOS use `faketime "@$t"` instead. diff --git a/test/functional/data/mainnet_alt.json b/test/functional/data/mainnet_alt.json index a4a072d2c5..96821a36f4 100644 --- a/test/functional/data/mainnet_alt.json +++ b/test/functional/data/mainnet_alt.json @@ -2014,7 +2014,8 @@ 1231247971, 1231248071, 1231248198, - 1231248322 + 1231248322, + 1231248621 ], "nonces": [ 2345621585, @@ -4031,6 +4032,7 @@ 3658502865, 2519048297, 1915965760, - 1183846025 + 1183846025, + 2713372123 ] } diff --git a/test/functional/mining_mainnet.py b/test/functional/mining_mainnet.py index c2757b6157..c58c4784b4 100755 --- a/test/functional/mining_mainnet.py +++ b/test/functional/mining_mainnet.py @@ -54,15 +54,15 @@ def add_options(self, parser): self.add_wallet_options(parser) - def mine(self, height, prev_hash, blocks, node, fees=0): + def mine(self, height, prev_hash, blocks, node): self.log.debug(f"height={height}") block = CBlock() block.nVersion = 0x20000000 block.hashPrevBlock = int(prev_hash, 16) block.nTime = blocks['timestamps'][height - 1] - block.nBits = DIFF_1_N_BITS + block.nBits = DIFF_1_N_BITS if height < 2016 else DIFF_4_N_BITS block.nNonce = blocks['nonces'][height - 1] - block.vtx = [create_coinbase(height=height, script_pubkey=bytes.fromhex(COINBASE_SCRIPT_PUBKEY), retarget_period=2016)] + block.vtx = [create_coinbase(height=height, script_pubkey=bytes.fromhex(COINBASE_SCRIPT_PUBKEY), halving_period=210000)] block.hashMerkleRoot = block.calc_merkle_root() block.rehash() block_hex = block.serialize(with_witness=False).hex() @@ -81,12 +81,15 @@ def run_test(self): self.log.info("Load alternative mainnet blocks") path = os.path.join(os.path.dirname(os.path.realpath(__file__)), self.options.datafile) prev_hash = node.getbestblockhash() + blocks = None with open(path, encoding='utf-8') as f: blocks = json.load(f) n_blocks = len(blocks['timestamps']) - assert_equal(n_blocks, 2015) - for i in range(2015): - prev_hash = self.mine(i + 1, prev_hash, blocks, node) + assert_equal(n_blocks, 2016) + + # Mine up to the last block of the first retarget period + for i in range(2015): + prev_hash = self.mine(i + 1, prev_hash, blocks, node) assert_equal(node.getblockcount(), 2015) @@ -101,5 +104,9 @@ def run_test(self): assert_equal(mining_info['next']['bits'], nbits_str(DIFF_4_N_BITS)) assert_equal(mining_info['next']['target'], target_str(DIFF_4_TARGET)) + # Mine first block of the second retarget period + height = 2016 + prev_hash = self.mine(height, prev_hash, blocks, node) + assert_equal(node.getblockcount(), height) if __name__ == '__main__': MiningMainnetTest(__file__).main() diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index 38600bc005..49e2518887 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -143,7 +143,7 @@ def script_BIP34_coinbase_height(height): return CScript([CScriptNum(height)]) -def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_script=None, fees=0, nValue=50, retarget_period=REGTEST_RETARGET_PERIOD): +def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_script=None, fees=0, nValue=50, halving_period=REGTEST_RETARGET_PERIOD): """Create a coinbase transaction. If pubkey is passed in, the coinbase output will be a P2PK output; @@ -156,7 +156,7 @@ def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_scr coinbaseoutput = CTxOut() coinbaseoutput.nValue = nValue * COIN if nValue == 50: - halvings = int(height / retarget_period) + halvings = int(height / halving_period) coinbaseoutput.nValue >>= halvings coinbaseoutput.nValue += fees if pubkey is not None: From 22ab141243eeb4a929e589ef70a6f54a5aaed3ba Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Sat, 20 Sep 2025 21:33:13 +0200 Subject: [PATCH 067/356] rpc: fix getblock(header) returns target for tip A target field was added to the getblock and getblockheader RPC calls in bitcoin#31583, but it mistakingly always used the tip value. Because regtest does not have difficulty adjustment, a test is added for mainnet instead. Github-Pull: #33446 Rebased-From: bf7996cbc3becf329d8b1cd2f1007fec9b3a3188 --- src/rpc/blockchain.cpp | 2 +- test/functional/mining_mainnet.py | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 8cbca51ccb..edda17d369 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -164,7 +164,7 @@ UniValue blockheaderToJSON(const CBlockIndex& tip, const CBlockIndex& blockindex result.pushKV("mediantime", blockindex.GetMedianTimePast()); result.pushKV("nonce", blockindex.nNonce); result.pushKV("bits", strprintf("%08x", blockindex.nBits)); - result.pushKV("target", GetTarget(tip, pow_limit).GetHex()); + result.pushKV("target", GetTarget(blockindex, pow_limit).GetHex()); result.pushKV("difficulty", GetDifficulty(blockindex)); result.pushKV("chainwork", blockindex.nChainWork.GetHex()); result.pushKV("nTx", blockindex.nTx); diff --git a/test/functional/mining_mainnet.py b/test/functional/mining_mainnet.py index c58c4784b4..456381af55 100755 --- a/test/functional/mining_mainnet.py +++ b/test/functional/mining_mainnet.py @@ -108,5 +108,17 @@ def run_test(self): height = 2016 prev_hash = self.mine(height, prev_hash, blocks, node) assert_equal(node.getblockcount(), height) + + mining_info = node.getmininginfo() + assert_equal(mining_info['difficulty'], 4) + + self.log.info("getblock RPC should show historical target") + block_info = node.getblock(node.getblockhash(1)) + + assert_equal(block_info['difficulty'], 1) + assert_equal(block_info['bits'], nbits_str(DIFF_1_N_BITS)) + assert_equal(block_info['target'], target_str(DIFF_1_TARGET)) + + if __name__ == '__main__': MiningMainnetTest(__file__).main() From 9960071e74a956f6535ca3ad6c6f362c25c15bae Mon Sep 17 00:00:00 2001 From: Marcel Stampfer Date: Wed, 24 Sep 2025 10:09:51 +0100 Subject: [PATCH 068/356] test: Fix typo in tool_cli_bash_completion.py: 'relevent' -> 'relevant' Github-Pull: knots#190 Rebased-From: f38418c3db56baee36969f92a153632c74173c30 --- test/functional/tool_cli_bash_completion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) mode change 100755 => 100644 test/functional/tool_cli_bash_completion.py diff --git a/test/functional/tool_cli_bash_completion.py b/test/functional/tool_cli_bash_completion.py old mode 100755 new mode 100644 index b8e0246b62..ff645f7959 --- a/test/functional/tool_cli_bash_completion.py +++ b/test/functional/tool_cli_bash_completion.py @@ -69,7 +69,7 @@ def get_num_args(self): return max(pos) def generate_autocomplete(self, pos): - """ Generate the autocomplete file line relevent to the given position pos. """ + """ Generate the autocomplete file line relevant to the given position pos. """ if len(self.arguments[pos]) == 0: raise AssertionError(f"generating undefined arg id {pos} ({self.arguments})") From 0eca32cc876e7149c52c2bb5aaa0db1d20d176ec Mon Sep 17 00:00:00 2001 From: Marcel Stampfer Date: Wed, 24 Sep 2025 10:11:05 +0100 Subject: [PATCH 069/356] test: Add zsh completion script generation support - Add tool_cli_completion.py test script supporting both bash and zsh completions - Add --bash-completion and --zsh-completion parameters for bitcoin-cli - Include zsh-specific completion header and footer templates - Make completion file comparison optional in tests - Refactor completion test for better efficiency and maintainability Github-Pull: knots#190 Rebased-From: e3f6d308a97d8c465fd519067e03e2175dbde7d3 --- .../bitcoin-cli.footer.zsh-completion | 48 +++ .../bitcoin-cli.header.zsh-completion | 25 ++ test/functional/test_runner.py | 2 +- test/functional/tool_cli_bash_completion.py | 282 ------------- test/functional/tool_cli_completion.py | 394 ++++++++++++++++++ 5 files changed, 468 insertions(+), 283 deletions(-) create mode 100644 test/functional/data/completion/bitcoin-cli.footer.zsh-completion create mode 100644 test/functional/data/completion/bitcoin-cli.header.zsh-completion delete mode 100644 test/functional/tool_cli_bash_completion.py create mode 100755 test/functional/tool_cli_completion.py diff --git a/test/functional/data/completion/bitcoin-cli.footer.zsh-completion b/test/functional/data/completion/bitcoin-cli.footer.zsh-completion new file mode 100644 index 0000000000..fa0fa595d9 --- /dev/null +++ b/test/functional/data/completion/bitcoin-cli.footer.zsh-completion @@ -0,0 +1,48 @@ + # Handle current word completions + case "$words[CURRENT]" in + -conf=*) + local conf_path=${words[CURRENT]#-conf=} + _files -W ${conf_path:h} -g "*" + return 0 + ;; + -datadir=*) + local datadir_path=${words[CURRENT]#-datadir=} + _files -/ -W ${datadir_path:h} + return 0 + ;; + -*=*) + # prevent nonsense completions + return 0 + ;; + *) + local helpopts commands + local -a opts + + # only parse -help if sensible (empty or starts with -) + if [[ -z "$words[CURRENT]" || "$words[CURRENT]" == -* ]]; then + helpopts="$($bitcoin_cli -help 2>&1 | awk '$1 ~ /^-/ { sub(/=.*/, "="); print $1 }')" + opts+=(${(f)helpopts}) + fi + + # only parse help if sensible (empty or starts with letter) + if [[ -z "$words[CURRENT]" || "$words[CURRENT]" == [a-z]* ]]; then + commands="$(_bitcoin_rpc help 2>/dev/null | awk '$1 ~ /^[a-z]/ { print $1; }')" + opts+=(${(f)commands}) + fi + + _describe 'bitcoin-cli options and commands' opts + + return 0 + ;; + esac +} + +# Function is now defined and will be called by zsh completion system + +# Local variables: +# mode: shell-script +# sh-basic-offset: 4 +# sh-indent-comment: t +# indent-tabs-mode: nil +# End: +# ex: ts=4 sw=4 et filetype=sh diff --git a/test/functional/data/completion/bitcoin-cli.header.zsh-completion b/test/functional/data/completion/bitcoin-cli.header.zsh-completion new file mode 100644 index 0000000000..5cd6c37022 --- /dev/null +++ b/test/functional/data/completion/bitcoin-cli.header.zsh-completion @@ -0,0 +1,25 @@ +# Copyright (c) 2012-2024 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. + +# call bitcoin-cli for RPC +_bitcoin_rpc() { + # determine already specified args necessary for RPC + local rpcargs=() + local -a words_array + words_array=(${(z)BUFFER}) + + for i in $words_array; do + case "$i" in + -conf=*|-datadir=*|-regtest|-rpc*|-testnet|-testnet4) + rpcargs+=("$i") + ;; + esac + done + + $bitcoin_cli "${rpcargs[@]}" "$@" +} + +_bitcoin-cli() { + local context state line + local bitcoin_cli="$words[1]" diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index ac1ff16b59..84fa9d82d0 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -193,7 +193,7 @@ 'feature_bind_extra.py', 'mempool_resurrect.py', 'wallet_txn_doublespend.py --mineblock', - 'tool_cli_bash_completion.py', + 'tool_cli_completion.py', 'tool_wallet.py --legacy-wallet', 'tool_wallet.py --legacy-wallet --bdbro', 'tool_wallet.py --legacy-wallet --bdbro --swap-bdb-endian', diff --git a/test/functional/tool_cli_bash_completion.py b/test/functional/tool_cli_bash_completion.py deleted file mode 100644 index ff645f7959..0000000000 --- a/test/functional/tool_cli_bash_completion.py +++ /dev/null @@ -1,282 +0,0 @@ -#!/usr/bin/env python3 - -from os import path -from collections import defaultdict - -from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal - - -# bash cli completion file header -COMPLETION_HEADER = """# Dynamic bash programmable completion for bitcoin-cli(1) -# DO NOT EDIT THIS FILE BY HAND -- THIS WILL FAIL THE FUNCTIONAL TEST tool_cli_completion -# This file is auto-generated by the functional test tool_cli_completion. -# If you want to modify this file, modify test/functional/tool_cli_completion.py and re-autogenerate -# this file via the --overwrite test flag. - -""" - -# option types which are limited to certain values -TYPED_OPTIONS = [ - ["estimate_mode", {"UNSET", "ECONOMICAL", "CONSERVATIVE"}], - ["sighashtype", {"ALL", "NONE", "SINGLE", "ALL|ANYONECANPAY", - "NONE|ANYONECANPAY", "SINGLE|ANYONECANPAY"}] -] - - -class PossibleArgs(): - """ Helper class to store options associated to a command. """ - def __init__(self, command): - self.command = command - self.arguments = {} - - def set_args(self, position, values): - """ Set the position-th positional argument as having values as possible values. """ - if position in self.arguments: - raise AssertionError(f"The positional parameter at position {position} is already defined for command '{self.command}'") - - self.arguments[position] = values - return self - - def set_bool_args(self, position): - return self.set_args(position, {"true", "false"}) - - def set_file_args(self, position): - # We consider an empty string as a file value for the sake of simplicity (don't - # have to create an extra level of indirection). - return self.set_args(position, {""}) - - def set_unknown_args(self, position): - return self.set_args(position, {}) - - def set_typed_option(self, position, arg_name): - """ Checks if arg_name is a typed option; if it is, sets it and return True. """ - for option_type in TYPED_OPTIONS: - if arg_name == option_type[0]: - self.set_args(position, option_type[1]) - return True - return False - - def has_option(self, position): - return position in self.arguments and len(self.arguments[position]) > 0 - - def get_num_args(self): - """ Return the max number of positional argument the option accepts. """ - pos = list(self.arguments.keys()) - if len(pos) == 0: - return 0 - - return max(pos) - - def generate_autocomplete(self, pos): - """ Generate the autocomplete file line relevant to the given position pos. """ - if len(self.arguments[pos]) == 0: - raise AssertionError(f"generating undefined arg id {pos} ({self.arguments})") - - # handle special file case - if len(self.arguments[pos]) == 1 and len(next(iter(self.arguments[pos]))) == 0: - return "_filedir" - - # a set order is undefined, so we order args alphabetically - args = list(self.arguments[pos]) - args.sort() - - return "COMPREPLY=( $( compgen -W \"" + ' '.join(args) + "\" -- \"$cur\" ) )" - -# commands where the option type can only be difficultly derived from the help message -SPECIAL_OPTIONS = [ - PossibleArgs("addnode").set_args(2, {"add", "remove", "onetry"}), - PossibleArgs("setban").set_args(2, {"add", "remove"}), -] - - -def generate_start_complete(cword): - """ Generate the start of an autocomplete block (beware of indentation). """ - if cword > 1: - return f""" if ((cword > {cword})); then - case ${{words[cword-{cword}]}} in""" - - return " case \"$prev\" in" - - -def generate_end_complete(cword): - """ Generate the end of an autocomplete block. """ - if cword > 1: - return f"\n{' ' * 8}esac\n{' ' * 4}fi\n\n" - - return f"\n{' ' * 4}esac\n" - - -class CliCompletionTest(BitcoinTestFramework): - def set_test_params(self): - self.num_nodes = 1 - - def skip_test_if_missing_module(self): - self.skip_if_no_cli() - # self.skip_if_no_wallet() - self.skip_if_no_bitcoind_zmq() - - def add_options(self, parser): - parser.add_argument( - '--header', - help='Static header part of the bash completion file', - ) - - parser.add_argument( - '--footer', - help='Static footer part of the bash completion file', - ) - - parser.add_argument( - '--completion', - help='Location of the current bash completion file', - ) - - parser.add_argument( - '--overwrite', - default=False, - action='store_true', - help='Force the test to overwrite the file pointer to by the --completion' - 'to the newly generated completion file', - ) - def parse_single_helper(self, option): - """ Complete the arguments of option via the RPC format command. """ - - res = self.nodes[0].format(command=option.command, output='args_cli') - if len(res) == 0: - return option - - if res.count('\n') > 1: - raise AssertionError( - f"command {option.command} doesn't support format RPC. Should it be a hidden command? " - f"Please call RPCHelpMan::Check when adding a new non-hidden command. Returned: {res}" - ) - - for idx, argument in enumerate(res.split(",")): - elems = argument.split(":") - - if option.set_typed_option(idx+1, elems[0]): - continue - - if elems[1] == "boolean": - option.set_bool_args(idx+1) - continue - - if elems[1] == "file": - option.set_file_args(idx+1) - continue - - if not option.has_option(idx+1): - option.set_unknown_args(idx+1) - - return option - - def get_command_options(self, command): - """ Returns the corresponding PossibleArgs for the command. """ - - # verify it's not a special option first - for soption in SPECIAL_OPTIONS: - if command == soption.command: - return self.parse_single_helper(soption) - - return self.parse_single_helper(PossibleArgs(command)) - - def generate_completion_block(self, options): - commands = [o.command for o in options] - self.log.info(f"Generating part of the completion file for options {commands}") - - if len(options) == 0: - return "" - - generated = "" - max_pos_options = max(options, key=lambda o: o.get_num_args()).get_num_args() - for cword in range(max_pos_options, 0, -1): - this_options = [option for option in options if option.has_option(cword)] - if len(this_options) == 0: - continue - - # group options by their arguments value - grouped_options = defaultdict(list) - for option in this_options: - arg = option.generate_autocomplete(cword) - grouped_options[arg].append(option) - - # generate the cword block - indent = 12 if cword > 1 else 8 - generated += generate_start_complete(cword) - for line, opt_gr in grouped_options.items(): - opt_gr.sort(key=lambda o: o.command) # show options alphabetically for clarity - args = '|'.join([o.command for o in opt_gr]) - generated += f"\n{' '*indent}{args})\n" - generated += f"{' ' * (indent + 4)}{line}\n{' ' * (indent + 4)}return 0\n{' ' * (indent + 4)};;" - generated += generate_end_complete(cword) - - return generated - - def generate_completion_file(self, commands): - try: - with open(self.options.header, 'r', encoding='utf-8') as header_file: - header = header_file.read() - - with open(self.options.footer, 'r', encoding='utf-8') as footer_file: - footer = footer_file.read() - except Exception as e: - raise AssertionError( - f"Could not read header/footer ({self.options.header} and {self.options.footer}) files. " - f"Tell the test where to find them using the --header/--footer parameters ({e})." - ) - return COMPLETION_HEADER + header + commands + footer - - def write_completion_file(self, new_file): - try: - with open(self.options.completion, 'w', encoding='utf-8') as completion_file: - completion_file.write(new_file) - except Exception as e: - raise AssertionError( - f"Could not write the autocomplete file to {self.options.completion}. " - f"Tell the test where to find it using the --completion parameters ({e})." - ) - - def read_completion_file(self): - try: - with open(self.options.completion, 'r', encoding='utf-8') as completion_file: - return completion_file.read() - except Exception as e: - raise AssertionError( - f"Could not read the autocomplete file ({self.options.completion}) file. " - f"Tell the test where to find it using the --completion parameters ({e})." - ) - - - def run_test(self): - # self.config is not available in self.add_options, so complete filepaths here - src_dir = self.config["environment"]["SRCDIR"] - test_data_dir = path.join(src_dir, 'test', 'functional', 'data', 'completion') - if self.options.header is None or len(self.options.header) == 0: - self.options.header = path.join(test_data_dir, 'bitcoin-cli.header.bash-completion') - - if self.options.footer is None or len(self.options.footer) == 0: - self.options.footer = path.join(test_data_dir, 'bitcoin-cli.footer.bash-completion') - - if self.options.completion is None or len(self.options.completion) == 0: - self.options.completion = path.join(src_dir, 'contrib', 'completions', 'bash', 'bitcoin-cli.bash') - - self.log.info('Parsing help commands to get all the command arguments...') - commands = self.nodes[0].help().split("\n") - commands = [c.split(' ')[0] for c in commands if not c.startswith("== ") and len(c) > 0] - commands = [self.get_command_options(c) for c in commands] - - self.log.info('Generating new autocompletion file...') - commands = self.generate_completion_block(commands) - new_completion = self.generate_completion_file(commands) - - if self.options.overwrite: - self.log.info("Overwriting the completion file...") - self.write_completion_file(new_completion) - - self.log.info('Checking if the generated and the original completion files matches...') - completion = self.read_completion_file() - assert_equal(new_completion, completion) - -if __name__ == '__main__': - CliCompletionTest(__file__).main() diff --git a/test/functional/tool_cli_completion.py b/test/functional/tool_cli_completion.py new file mode 100755 index 0000000000..65d6647151 --- /dev/null +++ b/test/functional/tool_cli_completion.py @@ -0,0 +1,394 @@ +#!/usr/bin/env python3 + +from os import path +from collections import defaultdict + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + + +# Common warning for auto-generated completion files +COMPLETION_WARNING = """# DO NOT EDIT THIS FILE BY HAND -- THIS WILL FAIL THE FUNCTIONAL TEST tool_cli_completion +# This file is auto-generated by the functional test tool_cli_completion. +# If you want to modify this file, modify test/functional/tool_cli_completion.py and re-autogenerate +# this file via the --overwrite test flag. + +""" + +# Completion file headers for different shells +BASH_COMPLETION_HEADER = f"""# Dynamic bash programmable completion for bitcoin-cli(1) +{COMPLETION_WARNING}""" + +ZSH_COMPLETION_HEADER = f"""#compdef bitcoin-cli +# zsh completion for bitcoin-cli(1) +{COMPLETION_WARNING}""" + +# option types which are limited to certain values +TYPED_OPTIONS = [ + ["estimate_mode", {"UNSET", "ECONOMICAL", "CONSERVATIVE"}], + ["sighashtype", {"ALL", "NONE", "SINGLE", "ALL|ANYONECANPAY", + "NONE|ANYONECANPAY", "SINGLE|ANYONECANPAY"}] +] + + +class PossibleArgs(): + """ Helper class to store options associated to a command. """ + def __init__(self, command): + self.command = command + self.arguments = {} + + def set_args(self, position, values): + """ Set the position-th positional argument as having values as possible values. """ + if position in self.arguments: + raise AssertionError(f"The positional parameter at position {position} is already defined for command '{self.command}'") + + self.arguments[position] = values + return self + + def set_bool_args(self, position): + return self.set_args(position, {"true", "false"}) + + def set_file_args(self, position): + # We consider an empty string as a file value for the sake of simplicity (don't + # have to create an extra level of indirection). + return self.set_args(position, {""}) + + def set_unknown_args(self, position): + return self.set_args(position, {}) + + def set_typed_option(self, position, arg_name): + """ Checks if arg_name is a typed option; if it is, sets it and return True. """ + for option_type in TYPED_OPTIONS: + if arg_name == option_type[0]: + self.set_args(position, option_type[1]) + return True + return False + + def has_option(self, position): + return position in self.arguments and len(self.arguments[position]) > 0 + + def get_num_args(self): + """ Return the max number of positional argument the option accepts. """ + pos = list(self.arguments.keys()) + if len(pos) == 0: + return 0 + + return max(pos) + + def generate_bash_autocomplete(self, pos): + """ Generate the bash autocomplete file line relevant to the given position pos. """ + if len(self.arguments[pos]) == 0: + raise AssertionError(f"generating undefined arg id {pos} ({self.arguments})") + + # handle special file case + if len(self.arguments[pos]) == 1 and len(next(iter(self.arguments[pos]))) == 0: + return "_filedir" + + # a set order is undefined, so we order args alphabetically + args = list(self.arguments[pos]) + args.sort() + return "COMPREPLY=( $( compgen -W \"" + ' '.join(args) + "\" -- \"$cur\" ) )" + + def generate_zsh_autocomplete(self, pos): + """ Generate the zsh autocomplete file line relevant to the given position pos. """ + if len(self.arguments[pos]) == 0: + raise AssertionError(f"generating undefined arg id {pos} ({self.arguments})") + + # handle special file case + if len(self.arguments[pos]) == 1 and len(next(iter(self.arguments[pos]))) == 0: + return "_files" + + # a set order is undefined, so we order args alphabetically + args = list(self.arguments[pos]) + args.sort() + return "_values 'arg' " + ' '.join(f"'{arg}'" for arg in args) + +# commands where the option type can only be difficultly derived from the help message +SPECIAL_OPTIONS = [ + PossibleArgs("addnode").set_args(2, {"add", "remove", "onetry"}), + PossibleArgs("setban").set_args(2, {"add", "remove"}), +] + + +def generate_start_complete(cword): + """ Generate the start of an autocomplete block (beware of indentation). """ + if cword > 1: + return f""" if ((cword > {cword})); then + case ${{words[cword-{cword}]}} in""" + + return " case \"$prev\" in" + + +def generate_end_complete(cword): + """ Generate the end of an autocomplete block. """ + if cword > 1: + return f"\n{' ' * 8}esac\n{' ' * 4}fi\n\n" + + return f"\n{' ' * 4}esac\n" + + +class CliCompletionTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + + def skip_test_if_missing_module(self): + self.skip_if_no_cli() + + def add_options(self, parser): + parser.add_argument( + '--overwrite', + default=False, + action='store_true', + help='Force the test to overwrite the completion files with newly generated ones', + ) + parser.add_argument( + '--bash-completion', + default=None, + help='Location of the current bash completion file', + ) + parser.add_argument( + '--zsh-completion', + default=None, + help='Location of the current zsh completion file', + ) + def parse_single_helper(self, option): + """ Complete the arguments of option via the RPC format command. """ + + res = self.nodes[0].format(command=option.command, output='args_cli') + if len(res) == 0: + return option + + if res.count('\n') > 1: + raise AssertionError( + f"command {option.command} doesn't support format RPC. Should it be a hidden command? " + f"Please call RPCHelpMan::Check when adding a new non-hidden command. Returned: {res}" + ) + + for idx, argument in enumerate(res.split(",")): + elems = argument.split(":") + + if option.set_typed_option(idx+1, elems[0]): + continue + + if elems[1] == "boolean": + option.set_bool_args(idx+1) + continue + + if elems[1] == "file": + option.set_file_args(idx+1) + continue + + if not option.has_option(idx+1): + option.set_unknown_args(idx+1) + + return option + + def get_command_options(self, command): + """ Returns the corresponding PossibleArgs for the command. """ + + # verify it's not a special option first + for soption in SPECIAL_OPTIONS: + if command == soption.command: + return self.parse_single_helper(soption) + + return self.parse_single_helper(PossibleArgs(command)) + + def generate_bash_completion_block(self, options): + """Generate bash-specific completion block.""" + commands = [o.command for o in options] + self.log.info(f"Generating bash completion for options {commands}") + + if len(options) == 0: + return "" + + generated = "" + max_pos_options = max(options, key=lambda o: o.get_num_args()).get_num_args() + for cword in range(max_pos_options, 0, -1): + this_options = [option for option in options if option.has_option(cword)] + if len(this_options) == 0: + continue + + # group options by their arguments value + grouped_options = defaultdict(list) + for option in this_options: + arg = option.generate_bash_autocomplete(cword) + grouped_options[arg].append(option) + + # generate the cword block + indent = 12 if cword > 1 else 8 + generated += generate_start_complete(cword) + for line, opt_gr in grouped_options.items(): + opt_gr.sort(key=lambda o: o.command) # show options alphabetically for clarity + args = '|'.join([o.command for o in opt_gr]) + generated += f"\n{' '*indent}{args})\n" + generated += f"{' ' * (indent + 4)}{line}\n{' ' * (indent + 4)}return 0\n{' ' * (indent + 4)};;" + generated += generate_end_complete(cword) + + return generated + + def generate_zsh_completion_block(self, options): + """Generate zsh-specific completion block.""" + commands = [o.command for o in options] + self.log.info(f"Generating zsh completion for options {commands}") + + if len(options) == 0: + return "" + + generated = "" + max_pos_options = max(options, key=lambda o: o.get_num_args()).get_num_args() + + # Generate completion blocks from highest position to lowest + for cword in range(max_pos_options, 0, -1): + this_options = [option for option in options if option.has_option(cword)] + if len(this_options) == 0: + continue + + # Group options by their arguments value + grouped_options = defaultdict(list) + for option in this_options: + arg = option.generate_zsh_autocomplete(cword) + grouped_options[arg].append(option) + + # Generate the CURRENT check and case block + if cword > 1: + generated += f"\n if (( CURRENT > {cword + 1} )); then\n" + generated += f" case ${{words[CURRENT-{cword}]}} in\n" + indent = 12 + else: + generated += "\n # Handle previous word completions\n" + generated += ' case "${words[CURRENT-1]}" in\n' + indent = 8 + + for line, opt_gr in grouped_options.items(): + opt_gr.sort(key=lambda o: o.command) # show options alphabetically for clarity + args = '|'.join([o.command for o in opt_gr]) + generated += f"{' '*indent}{args})\n" + generated += f"{' ' * (indent + 4)}{line}\n" + generated += f"{' ' * (indent + 4)}return 0\n" + generated += f"{' ' * (indent + 4)};;\n" + + if cword > 1: + generated += " esac\n" + generated += " fi\n" + else: + generated += " esac\n" + + return generated + + def generate_both_completion_blocks(self, options): + """Generate both bash and zsh completion blocks.""" + bash_block = self.generate_bash_completion_block(options) + zsh_block = self.generate_zsh_completion_block(options) + return bash_block, zsh_block + + def generate_completion_files(self, bash_commands, zsh_commands, bash_header_path, bash_footer_path, zsh_header_path, zsh_footer_path): + """Generate both bash and zsh completion files.""" + # Read bash header and footer + try: + with open(bash_header_path, 'r', encoding='utf-8') as f: + bash_header = f.read() + with open(bash_footer_path, 'r', encoding='utf-8') as f: + bash_footer = f.read() + except Exception as e: + raise AssertionError( + f"Could not read bash header/footer files ({bash_header_path} and {bash_footer_path}): {e}" + ) + + # Read zsh header and footer + try: + with open(zsh_header_path, 'r', encoding='utf-8') as f: + zsh_header = f.read() + with open(zsh_footer_path, 'r', encoding='utf-8') as f: + zsh_footer = f.read() + except Exception as e: + raise AssertionError( + f"Could not read zsh header/footer files ({zsh_header_path} and {zsh_footer_path}): {e}" + ) + + bash_completion = BASH_COMPLETION_HEADER + bash_header + bash_commands + bash_footer + zsh_completion = ZSH_COMPLETION_HEADER + zsh_header + zsh_commands + zsh_footer + + return bash_completion, zsh_completion + + def write_completion_file(self, new_file, file_path): + """Write a completion file to the specified path.""" + try: + with open(file_path, 'w', encoding='utf-8') as completion_file: + completion_file.write(new_file) + except Exception as e: + raise AssertionError( + f"Could not write the autocomplete file to {file_path}: {e}" + ) + + def read_completion_file(self, file_path): + """Read a completion file from the specified path.""" + try: + with open(file_path, 'r', encoding='utf-8') as completion_file: + return completion_file.read() + except Exception as e: + raise AssertionError( + f"Could not read the autocomplete file ({file_path}): {e}" + ) + + + def run_test(self): + # self.config is not available in self.add_options, so complete filepaths here + src_dir = self.config["environment"]["SRCDIR"] + test_data_dir = path.join(src_dir, 'test', 'functional', 'data', 'completion') + + # Define all file paths + bash_header_path = path.join(test_data_dir, 'bitcoin-cli.header.bash-completion') + bash_footer_path = path.join(test_data_dir, 'bitcoin-cli.footer.bash-completion') + + # Use command line parameter if provided, otherwise use default path + if self.options.bash_completion: + bash_completion_path = self.options.bash_completion + else: + bash_completion_path = path.join(src_dir, 'contrib', 'completions', 'bash', 'bitcoin-cli.bash') + + zsh_header_path = path.join(test_data_dir, 'bitcoin-cli.header.zsh-completion') + zsh_footer_path = path.join(test_data_dir, 'bitcoin-cli.footer.zsh-completion') + + # Use command line parameter if provided, otherwise use default path + if self.options.zsh_completion: + zsh_completion_path = self.options.zsh_completion + else: + zsh_completion_path = path.join(src_dir, 'contrib', 'completions', 'zsh', 'bitcoin-cli.zsh') + + self.log.info('Parsing help commands to get all the command arguments...') + commands = self.nodes[0].help().split("\n") + commands = [c.split(' ')[0] for c in commands if not c.startswith("== ") and len(c) > 0] + command_options = [self.get_command_options(c) for c in commands] + + self.log.info('Generating new bash and zsh completion files...') + bash_commands, zsh_commands = self.generate_both_completion_blocks(command_options) + + bash_completion, zsh_completion = self.generate_completion_files( + bash_commands, zsh_commands, + bash_header_path, bash_footer_path, + zsh_header_path, zsh_footer_path + ) + + if self.options.overwrite: + self.log.info("Overwriting the bash and zsh completion files...") + self.write_completion_file(bash_completion, bash_completion_path) + self.write_completion_file(zsh_completion, zsh_completion_path) + + # Check bash completion file + if path.exists(bash_completion_path): + self.log.info('Checking if the generated and original bash completion files match...') + existing_bash = self.read_completion_file(bash_completion_path) + assert_equal(bash_completion, existing_bash) + else: + self.log.warning(f'Bash completion file not found at {bash_completion_path}, skipping comparison') + + # Check zsh completion file + if path.exists(zsh_completion_path): + self.log.info('Checking if the generated and original zsh completion files match...') + existing_zsh = self.read_completion_file(zsh_completion_path) + assert_equal(zsh_completion, existing_zsh) + else: + self.log.warning(f'Zsh completion file not found at {zsh_completion_path}, skipping comparison') + +if __name__ == '__main__': + CliCompletionTest(__file__).main() From aff95a8a606becdc1667da32096d21ab4ea8b6cb Mon Sep 17 00:00:00 2001 From: ismaelsadeeq Date: Wed, 24 Sep 2025 16:31:38 +0200 Subject: [PATCH 070/356] miner: fix `addPackageTxs` unsigned integer overflow Github-Pull: #33475 Rebased-From: b807dfcdc5929c314d43b790c9e705d5bf0a86e8 --- src/node/miner.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/node/miner.cpp b/src/node/miner.cpp index 2b9c07d469..07a0a3b5c5 100644 --- a/src/node/miner.cpp +++ b/src/node/miner.cpp @@ -395,8 +395,8 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda ++nConsecutiveFailed; - if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight > - m_options.nBlockMaxWeight - BLOCK_FULL_ENOUGH_WEIGHT_DELTA) { + if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight + + BLOCK_FULL_ENOUGH_WEIGHT_DELTA > m_options.nBlockMaxWeight) { // Give up if we're close to full and haven't succeeded in a while break; } From 9d9baafc6f9357179e57fdcc6cf2ce36d65dd16d Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Fri, 26 Sep 2025 19:25:26 +0200 Subject: [PATCH 071/356] doc: rpc: fix case typo in `finalizepsbt` help (final_scriptwitness) Github-Pull: #33484 Rebased-From: ff05bebcc4262966b117082a67dc4c63a3f67d2d --- src/rpc/rawtransaction.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 421656152c..77e8fd49e1 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -1494,7 +1494,7 @@ static RPCHelpMan finalizepsbt() return RPCHelpMan{"finalizepsbt", "Finalize the inputs of a PSBT. If the transaction is fully signed, it will produce a\n" "network serialized transaction which can be broadcast with sendrawtransaction. Otherwise a PSBT will be\n" - "created which has the final_scriptSig and final_scriptWitness fields filled for inputs that are complete.\n" + "created which has the final_scriptSig and final_scriptwitness fields filled for inputs that are complete.\n" "Implements the Finalizer and Extractor roles.\n", { {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "A base64 string of a PSBT"}, From a8c2370e2dc72daab04e03e5491489a68dcd4716 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 30 Sep 2025 08:52:44 +0000 Subject: [PATCH 072/356] Interpret ignore_rejects=truc to ignore all TRUC policies --- src/validation.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/validation.cpp b/src/validation.cpp index ccb7d2b199..266ff91356 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1049,6 +1049,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // Even though just checking direct mempool parents for inheritance would be sufficient, we // check using the full ancestor set here because it's more convenient to use what we have // already calculated. + if (!ignore_rejects.count("truc")) { if (const auto err{SingleTRUCChecks(ws.m_ptx, "truc-", reason, ignore_rejects, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) { // Single transaction contexts only. if (args.m_allow_sibling_eviction && err->second != nullptr) { @@ -1071,6 +1072,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, reason, err->first); } } + } // !ignore_rejects.count("truc") // A transaction that spends outputs that would be replaced by it is invalid. Now // that we have the set of all ancestors we can detect this From 8c84ff9539219e560a322a149978d17d1e69025f Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 30 Sep 2025 10:15:35 +0000 Subject: [PATCH 073/356] QA: feature_rbf: Import NODE_REPLACE_BY_FEE from test_framework.messages --- test/functional/feature_rbf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py index 57b2a862a7..9c80b4fa52 100755 --- a/test/functional/feature_rbf.py +++ b/test/functional/feature_rbf.py @@ -9,6 +9,7 @@ from test_framework.messages import ( MAX_BIP125_RBF_SEQUENCE, COIN, + NODE_REPLACE_BY_FEE, SEQUENCE_FINAL, ) from test_framework.test_framework import BitcoinTestFramework @@ -65,7 +66,6 @@ def test_rpc_rbf_policy(): self.log.info("Running test service flag") def test_service_flag(): - NODE_REPLACE_BY_FEE = (1 << 26) for i in range(3): assert not (int(self.nodes[i].getnetworkinfo()['localservices'], 0x10) & NODE_REPLACE_BY_FEE) assert 'REPLACE_BY_FEE?' not in self.nodes[i].getnetworkinfo()['localservicesnames'] From b3b512dc6ee6fc148c4d3255c4cf21de1aefca79 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 30 Sep 2025 10:47:04 +0000 Subject: [PATCH 074/356] Discontinue advertising NODE_REPLACE_BY_FEE service bit Full RBF is mostly ubiquitous these days, so retire the temporary service bit so it can be reused eventually --- src/init.cpp | 4 ---- test/functional/feature_rbf.py | 6 ++---- test/functional/p2p_node_network_limited.py | 3 +-- 3 files changed, 3 insertions(+), 10 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 1fd2e8dc01..cbf6152f41 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1256,10 +1256,6 @@ static ChainstateLoadResult InitAndLoadChainstate( LogPrintf("* Flushing caches if available system memory drops below %s MiB\n", g_low_memory_threshold / 1024 / 1024); } - if (mempool_opts.rbf_policy == RBFPolicy::Always) { - g_local_services = ServiceFlags(g_local_services | NODE_REPLACE_BY_FEE); - } - ChainstateManager::Options chainman_opts{ .chainparams = chainparams, .datadir = args.GetDataDirNet(), diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py index 9c80b4fa52..08a5e4d753 100755 --- a/test/functional/feature_rbf.py +++ b/test/functional/feature_rbf.py @@ -64,13 +64,11 @@ def test_rpc_rbf_policy(): assert_equal(self.nodes[3].getmempoolinfo()["rbf_policy"], 'always') test_rpc_rbf_policy() - self.log.info("Running test service flag") + self.log.info("Running test no service flag") def test_service_flag(): - for i in range(3): + for i in range(4): assert not (int(self.nodes[i].getnetworkinfo()['localservices'], 0x10) & NODE_REPLACE_BY_FEE) assert 'REPLACE_BY_FEE?' not in self.nodes[i].getnetworkinfo()['localservicesnames'] - assert int(self.nodes[3].getnetworkinfo()['localservices'], 0x10) & NODE_REPLACE_BY_FEE - assert 'REPLACE_BY_FEE?' in self.nodes[3].getnetworkinfo()['localservicesnames'] test_service_flag() self.log.info("Running test simple doublespend...") diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py index 9f61712267..7788be6adb 100755 --- a/test/functional/p2p_node_network_limited.py +++ b/test/functional/p2p_node_network_limited.py @@ -14,7 +14,6 @@ NODE_NETWORK_LIMITED, NODE_P2P_V2, NODE_WITNESS, - NODE_REPLACE_BY_FEE, msg_getdata, ) from test_framework.p2p import P2PInterface @@ -119,7 +118,7 @@ def test_avoid_requesting_historical_blocks(self): def run_test(self): node = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) - expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED | NODE_REPLACE_BY_FEE + expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED if self.options.v2transport: expected_services |= NODE_P2P_V2 From bda4457e82d6e359737a4e49bd9d153c3578881c Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 30 Sep 2025 10:56:26 +0000 Subject: [PATCH 075/356] Revert "QA: interface_bitcoin_cli: Adjust expected service flags to include RBF" This reverts commit 97edf90e7bda875821a1b2eb137adfa551c68ed6. --- test/functional/interface_bitcoin_cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py index 55657aff8d..e8dca189c7 100755 --- a/test/functional/interface_bitcoin_cli.py +++ b/test/functional/interface_bitcoin_cli.py @@ -93,7 +93,7 @@ def test_netinfo(self): self.log.info("Test -netinfo local services are moved to header if details are requested") det = self.nodes[0].cli('-netinfo', '1').send_cli().splitlines() self.log.debug(f"Test -netinfo 1 header output: {det[0]}") - assert re.match(rf"^{re.escape(self.config['environment']['CLIENT_NAME'])} client.+services nwl2?r$", det[0]) + assert re.match(rf"^{re.escape(self.config['environment']['CLIENT_NAME'])} client.+services nwl2?$", det[0]) assert not any(line.startswith("Local services:") for line in det) def run_test(self): From 66ad7250eeb5c2e9de776efbedbdf41181276a21 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Tue, 30 Sep 2025 11:20:12 +0100 Subject: [PATCH 076/356] depends: Fix `$(package)_fetched` target Ensure the download timestamp is created only after a successful download. Github-Pull: #33494 Rebased-From: 771978952a98a0da2d215c9ed8c5db13250ad58d --- depends/funcs.mk | 1 - 1 file changed, 1 deletion(-) diff --git a/depends/funcs.mk b/depends/funcs.mk index 3c0dc7a7fc..d14a5cd9d4 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -206,7 +206,6 @@ endif $($(1)_fetched): mkdir -p $$(@D) $(SOURCES_PATH) rm -f $$@ - touch $$@ cd $$(@D); $($(1)_fetch_cmds) cd $($(1)_source_dir); $(foreach source,$($(1)_all_sources),$(build_SHA256SUM) $(source) >> $$(@);) touch $$@ From 629ec2d73195a869018daa559ce5e2cfda067702 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Mon, 29 Sep 2025 14:30:25 +0100 Subject: [PATCH 077/356] depends: Update URL for `qrencode` package source tarball The https://fukuchi.org/ homepage no longer links to the source tarball, and previously available files appear to have been removed. The homepage now instructs users to download source tarballs from the GitHub releases page instead. The diff between the source trees is immaterial. Github-Pull: #33494 Rebased-From: 9dbfce7fc84184fd1f910fb19741ab8de02e62b4 --- depends/packages/qrencode.mk | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/depends/packages/qrencode.mk b/depends/packages/qrencode.mk index e3f614091d..9aae62f01c 100644 --- a/depends/packages/qrencode.mk +++ b/depends/packages/qrencode.mk @@ -1,8 +1,9 @@ package=qrencode $(package)_version=4.1.1 -$(package)_download_path=https://fukuchi.org/works/qrencode/ +$(package)_download_path=https://github.com/fukuchi/libqrencode/archive/refs/tags/ +$(package)_download_file=v$($(package)_version).tar.gz $(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=da448ed4f52aba6bcb0cd48cac0dd51b8692bccc4cd127431402fca6f8171e8e +$(package)_sha256_hash=5385bc1b8c2f20f3b91d258bf8ccc8cf62023935df2d2676b5b67049f31a049c $(package)_patches=cmake_fixups.patch define $(package)_set_vars From fbaa13327784d2e9819e0dcdede4f13c2755ffc0 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 30 Sep 2025 08:10:26 +0000 Subject: [PATCH 078/356] depends: Rename GitHub-sourced qrencode to avoid cache conflicts --- depends/packages/qrencode.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depends/packages/qrencode.mk b/depends/packages/qrencode.mk index 9aae62f01c..44e80b2a19 100644 --- a/depends/packages/qrencode.mk +++ b/depends/packages/qrencode.mk @@ -2,7 +2,7 @@ package=qrencode $(package)_version=4.1.1 $(package)_download_path=https://github.com/fukuchi/libqrencode/archive/refs/tags/ $(package)_download_file=v$($(package)_version).tar.gz -$(package)_file_name=$(package)-$($(package)_version).tar.gz +$(package)_file_name=$(package)-$($(package)_version)-github.tar.gz $(package)_sha256_hash=5385bc1b8c2f20f3b91d258bf8ccc8cf62023935df2d2676b5b67049f31a049c $(package)_patches=cmake_fixups.patch From d7bc5138e196684c5d615eec50b7a5c61080775d Mon Sep 17 00:00:00 2001 From: /dev/fd0 <147166694+1440000bytes@users.noreply.github.com> Date: Thu, 2 Oct 2025 03:59:22 +0530 Subject: [PATCH 079/356] add migratewallet rpc in historyFilter Github-Pull: knots#203 Rebased-From: 6a5537ab675300911cba435251ee1d9194b4929c --- src/qt/rpcconsole.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index 5ebacbc500..701ef1d9c2 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -80,7 +80,8 @@ const QStringList historyFilter = QStringList() << "signrawtransactionwithkey" << "walletpassphrase" << "walletpassphrasechange" - << "encryptwallet"; + << "encryptwallet" + << "migratewallet"; } From 45121aa8b89e3d527bd8f98e35f78c9deb805ab3 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 2 Oct 2025 11:49:03 +0000 Subject: [PATCH 080/356] Bugfix: Wallet: Migration: Adapt sanity checks for walletimplicitsegwit=0 --- src/wallet/scriptpubkeyman.cpp | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 734dfa64da..c8b8523750 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -1840,6 +1840,26 @@ std::optional LegacyDataSPKM::MigrateToDescriptor() return std::nullopt; } + constexpr auto sanitycheck = [](const bool erased, const bool maybe_compressed_key, const CScript &spk, const LegacyDataSPKM& self, const DescriptorScriptPubKeyMan& desc_spk_man) { + assert(desc_spk_man.IsMine(spk) == ISMINE_SPENDABLE); + if (erased) { + assert(self.IsMine(spk) == ISMINE_SPENDABLE); + return; + } + if (maybe_compressed_key && !g_implicit_segwit) { + // combo() includes segwit + if (spk.IsPayToScriptHash()) return; + int witness_version; + std::vector witness_program; + if (spk.IsWitnessProgram(witness_version, witness_program)) { + if (witness_version == 0 && witness_program.size() == 20) { + return; + } + } + } + assert(erased); + }; + // keyids is now all non-HD keys. Each key will have its own combo descriptor for (const CKeyID& keyid : keyids) { CKey key; @@ -1877,8 +1897,7 @@ std::optional LegacyDataSPKM::MigrateToDescriptor() // Remove the scriptPubKeys from our current set for (const CScript& spk : desc_spks) { size_t erased = spks.erase(spk); - assert(erased == 1); - assert(IsMine(spk) == ISMINE_SPENDABLE); + sanitycheck(erased, key.IsCompressed(), spk, *this, *desc_spk_man); } out.desc_spkms.push_back(std::move(desc_spk_man)); @@ -1923,8 +1942,7 @@ std::optional LegacyDataSPKM::MigrateToDescriptor() // Remove the scriptPubKeys from our current set for (const CScript& spk : desc_spks) { size_t erased = spks.erase(spk); - assert(erased == 1); - assert(IsMine(spk) == ISMINE_SPENDABLE); + sanitycheck(erased, /*maybe_compressed_key=*/true, spk, *this, *desc_spk_man); } out.desc_spkms.push_back(std::move(desc_spk_man)); From 6f23ead4a2d97e245f4fc1824b1dd956dc06cc42 Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Mon, 29 Sep 2025 15:47:07 -0400 Subject: [PATCH 081/356] fuzz: don't bypass_limits for most mempool harnesses Using bypass_limits=true is essentially fuzzing part of a reorg only, and results in TRUC invariants unable to be checked. Remove most instances of bypassing limits, leaving one harness able to do so. Github-Pull: #33504 Rebased-From: bbe8e9063c15dc230553e0cbf16d603f5ad0e4cf --- src/test/fuzz/package_eval.cpp | 2 +- src/test/fuzz/tx_pool.cpp | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/src/test/fuzz/package_eval.cpp b/src/test/fuzz/package_eval.cpp index 8e3d84a9e6..37b18a5941 100644 --- a/src/test/fuzz/package_eval.cpp +++ b/src/test/fuzz/package_eval.cpp @@ -324,7 +324,7 @@ FUZZ_TARGET(ephemeral_package_eval, .init = initialize_tx_pool) return ProcessNewPackage(chainstate, tx_pool, txs, /*test_accept=*/single_submit, /*client_maxfeerate=*/{})); const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, txs.back(), GetTime(), - /*bypass_limits=*/fuzzed_data_provider.ConsumeBool(), /*test_accept=*/!single_submit)); + /*bypass_limits=*/false, /*test_accept=*/!single_submit)); if (!single_submit && result_package.m_state.GetResult() != PackageValidationResult::PCKG_POLICY) { // We don't know anything about the validity since transactions were randomly generated, so diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp index a697ee9d83..98feadf516 100644 --- a/src/test/fuzz/tx_pool.cpp +++ b/src/test/fuzz/tx_pool.cpp @@ -295,7 +295,6 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool) std::set added; auto txr = std::make_shared(removed, added); node.validation_signals->RegisterSharedValidationInterface(txr); - const bool bypass_limits = fuzzed_data_provider.ConsumeBool(); // Make sure ProcessNewPackage on one transaction works. // The result is not guaranteed to be the same as what is returned by ATMP. @@ -310,7 +309,7 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool) it->second.m_result_type == MempoolAcceptResult::ResultType::INVALID); } - const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), bypass_limits, /*test_accept=*/false)); + const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), /*bypass_limits=*/false, /*test_accept=*/false)); const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID; node.validation_signals->SyncWithValidationInterfaceQueue(); node.validation_signals->UnregisterSharedValidationInterface(txr); @@ -393,6 +392,9 @@ FUZZ_TARGET(tx_pool, .init = initialize_tx_pool) chainstate.SetMempool(&tx_pool); + // If we ever bypass limits, do not do TRUC invariants checks + bool ever_bypassed_limits{false}; + LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300) { const auto mut_tx = ConsumeTransaction(fuzzed_data_provider, txids); @@ -411,13 +413,17 @@ FUZZ_TARGET(tx_pool, .init = initialize_tx_pool) tx_pool.PrioritiseTransaction(txid.ToUint256(), delta); } + const bool bypass_limits{fuzzed_data_provider.ConsumeBool()}; + ever_bypassed_limits |= bypass_limits; + const auto tx = MakeTransactionRef(mut_tx); - const bool bypass_limits = fuzzed_data_provider.ConsumeBool(); const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), bypass_limits, /*test_accept=*/false)); const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID; if (accepted) { txids.push_back(tx->GetHash()); - CheckMempoolTRUCInvariants(tx_pool); + if (!ever_bypassed_limits) { + CheckMempoolTRUCInvariants(tx_pool); + } } } Finish(fuzzed_data_provider, tx_pool, chainstate); From 666aec7d49506c587ecbbcd71f6e8f1e7bb4e4cd Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Fri, 26 Sep 2025 14:47:47 -0400 Subject: [PATCH 082/356] Mempool: Do not enforce TRUC checks on reorg Not enforcing TRUC topology on reorg was the intended behavior, but the appropriate bypass argument was not checked. This mistake means we could potentially invalidate a long chain of perfectly incentive-compatible transactions that were made historically, including subsequent non-TRUC transactions, all of which may have been very high feerate. Lastly, it wastes CPU cycles doing topology checks since this behavior cannot actually enforce the topology in general for the reorg setting. Github-Pull: #33504 Rebased-From: 26e71c237d9d2197824b547f55ee3a0a60149f92 --- src/validation.cpp | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index fde064458d..85504d1e29 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1025,26 +1025,28 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // Even though just checking direct mempool parents for inheritance would be sufficient, we // check using the full ancestor set here because it's more convenient to use what we have // already calculated. - if (const auto err{SingleTRUCChecks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) { - // Single transaction contexts only. - if (args.m_allow_sibling_eviction && err->second != nullptr) { - // We should only be considering where replacement is considered valid as well. - Assume(args.m_allow_replacement); - - // Potential sibling eviction. Add the sibling to our list of mempool conflicts to be - // included in RBF checks. - ws.m_conflicts.insert(err->second->GetHash()); - // Adding the sibling to m_iters_conflicting here means that it doesn't count towards - // RBF Carve Out above. This is correct, since removing to-be-replaced transactions from - // the descendant count is done separately in SingleTRUCChecks for TRUC transactions. - ws.m_iters_conflicting.insert(m_pool.GetIter(err->second->GetHash()).value()); - ws.m_sibling_eviction = true; - // The sibling will be treated as part of the to-be-replaced set in ReplacementChecks. - // Note that we are not checking whether it opts in to replaceability via BIP125 or TRUC - // (which is normally done in PreChecks). However, the only way a TRUC transaction can - // have a non-TRUC and non-BIP125 descendant is due to a reorg. - } else { - return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "TRUC-violation", err->first); + if (!args.m_bypass_limits) { + if (const auto err{SingleTRUCChecks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) { + // Single transaction contexts only. + if (args.m_allow_sibling_eviction && err->second != nullptr) { + // We should only be considering where replacement is considered valid as well. + Assume(args.m_allow_replacement); + + // Potential sibling eviction. Add the sibling to our list of mempool conflicts to be + // included in RBF checks. + ws.m_conflicts.insert(err->second->GetHash()); + // Adding the sibling to m_iters_conflicting here means that it doesn't count towards + // RBF Carve Out above. This is correct, since removing to-be-replaced transactions from + // the descendant count is done separately in SingleTRUCChecks for TRUC transactions. + ws.m_iters_conflicting.insert(m_pool.GetIter(err->second->GetHash()).value()); + ws.m_sibling_eviction = true; + // The sibling will be treated as part of the to-be-replaced set in ReplacementChecks. + // Note that we are not checking whether it opts in to replaceability via BIP125 or TRUC + // (which is normally done in PreChecks). However, the only way a TRUC transaction can + // have a non-TRUC and non-BIP125 descendant is due to a reorg. + } else { + return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "TRUC-violation", err->first); + } } } From a8bb76b61f49e1abd681f21a754f970eef206ced Mon Sep 17 00:00:00 2001 From: Greg Sanders Date: Fri, 26 Sep 2025 14:49:06 -0400 Subject: [PATCH 083/356] test: add more TRUC reorg coverge Github-Pull: #33504 Rebased-From: 06df14ba75be5f48cf9c417424900ace17d1cf4d --- test/functional/mempool_truc.py | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/test/functional/mempool_truc.py b/test/functional/mempool_truc.py index 8850ba8002..d095033a84 100755 --- a/test/functional/mempool_truc.py +++ b/test/functional/mempool_truc.py @@ -164,23 +164,36 @@ def test_truc_replacement(self): def test_truc_reorg(self): node = self.nodes[0] self.log.info("Test that, during a reorg, TRUC rules are not enforced") - tx_v2_block = self.wallet.send_self_transfer(from_node=node, version=2) - tx_v3_block = self.wallet.send_self_transfer(from_node=node, version=3) - tx_v3_block2 = self.wallet.send_self_transfer(from_node=node, version=3) - self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"]]) + self.check_mempool([]) + + # Testing 2<-3 versions allowed + tx_v2_block = self.wallet.create_self_transfer(version=2) + + # Testing 3<-2 versions allowed + tx_v3_block = self.wallet.create_self_transfer(version=3) + + # Testing overly-large child size + tx_v3_block2 = self.wallet.create_self_transfer(version=3) + + # Also create a linear chain of 3 TRUC transactions that will be directly mined, followed by one v2 in-mempool after block is made + tx_chain_1 = self.wallet.create_self_transfer(version=3) + tx_chain_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_chain_1["new_utxo"], version=3) + tx_chain_3 = self.wallet.create_self_transfer(utxo_to_spend=tx_chain_2["new_utxo"], version=3) + + tx_to_mine = [tx_v3_block["hex"], tx_v2_block["hex"], tx_v3_block2["hex"], tx_chain_1["hex"], tx_chain_2["hex"], tx_chain_3["hex"]] + block = self.generateblock(node, output="raw(42)", transactions=tx_to_mine) - block = self.generate(node, 1) self.check_mempool([]) tx_v2_from_v3 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block["new_utxo"], version=2) tx_v3_from_v2 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v2_block["new_utxo"], version=3) tx_v3_child_large = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block2["new_utxo"], target_vsize=1250, version=3) assert_greater_than(node.getmempoolentry(tx_v3_child_large["txid"])["vsize"], TRUC_CHILD_MAX_VSIZE) - self.check_mempool([tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]]) - node.invalidateblock(block[0]) - self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"], tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]]) - # This is needed because generate() will create the exact same block again. - node.reconsiderblock(block[0]) + tx_chain_4 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_chain_3["new_utxo"], version=2) + self.check_mempool([tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"], tx_chain_4["txid"]]) + # Reorg should have all block transactions re-accepted, ignoring TRUC enforcement + node.invalidateblock(block["hash"]) + self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"], tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"], tx_chain_1["txid"], tx_chain_2["txid"], tx_chain_3["txid"], tx_chain_4["txid"]]) @cleanup(extra_args=["-limitdescendantsize=10", "-datacarriersize=40000"]) def test_nondefault_package_limits(self): From 2d7ebd2d913ea63c1a23fefa0a09ee06fb069161 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 24 Sep 2025 10:35:55 -0400 Subject: [PATCH 084/356] doc: update release notes for 29.x --- doc/release-notes.md | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index 0325d3a3e2..8a79e99ad2 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.2rc1 is now available from: +Bitcoin Core version 29.2rc2 is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -43,6 +43,14 @@ Notable changes - #33296 net: check for empty header before calling FillBlock - #33395 net: do not apply whitelist permissions to onion inbounds +### Mempool + +- #33504 mempool: Do not enforce TRUC checks on reorg + +### RPC + +- #33446 rpc: fix getblock(header) returns target for tip + ### CI - #32999 ci: Use APT_LLVM_V in msan task @@ -50,6 +58,10 @@ Notable changes - #33258 ci: use LLVM 21 - #33364 ci: always use tag for LLVM checkout +### Doc + +- #33484 doc: rpc: fix case typo in `finalizepsbt` help + ### Misc - #33310 trace: Workaround GCC bug compiling with old systemtap @@ -67,6 +79,8 @@ Thanks to everyone who directly contributed to this release: - Luke Dashjr - MarcoFalke - Martin Zumsande +- Sebastian Falbesoner +- Sjors Provoost - Vasil Dimov As well as to everyone that helped with translations on From e4713ff8f9dc885afd718ae86def4050b8a96a95 Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Tue, 30 Sep 2025 14:47:17 -0400 Subject: [PATCH 085/356] test: Test SIGTERM handling during waitforblockheight call Currently when CTRL-C is pressed and there is an active `waitforblockheight`, or `waitforblock`, or `waitfornewblock` RPC call, or a mining interface `waitTipChanged` IPC call with a long timeout, the node will not shut down right away, and will wait for the timeout to be reached before exiting. This behavior is not ideal and only happens when the node is stopped with CTRL-C or SIGTERM. When the node is stopped with `bitcoin-cli stop`, the wait calls are interrupted and the node does shut down right away. The next commit improves node behavior. This commit just adds test coverage to simplify the next commit and clarify the change in behavior there. Github-Pull: #33511 Rebased-From: 6a29f79006a9d60b476893dface5eea8f9bf271c --- test/functional/feature_init.py | 56 +++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/test/functional/feature_init.py b/test/functional/feature_init.py index 7c157b5267..0b25fa2dca 100755 --- a/test/functional/feature_init.py +++ b/test/functional/feature_init.py @@ -3,13 +3,16 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests related to node initialization.""" +from concurrent.futures import ThreadPoolExecutor from pathlib import Path import os import platform import shutil import signal import subprocess +import time +from test_framework.authproxy import JSONRPCException from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ( BITCOIN_PID_FILENAME_DEFAULT, @@ -177,9 +180,62 @@ def init_pid_test(self): self.stop_node(0) assert not custom_pidfile_absolute.exists() + def break_wait_test(self): + """Test what happens when a break signal is sent during a + waitforblockheight RPC call with a long timeout. Ctrl-Break is sent on + Windows and SIGTERM is sent on other platforms, to trigger the same node + shutdown sequence that would happen if Ctrl-C were pressed in a + terminal. (This can be different than the node shutdown sequence that + happens when the stop RPC is sent.) + + Currently when the break signal is sent, it does not interrupt the + waitforblockheight RPC call, and the node does not exit until it times + out.""" + + self.log.info("Testing waitforblockheight RPC call followed by break signal") + node = self.nodes[0] + + if platform.system() == 'Windows': + # CREATE_NEW_PROCESS_GROUP prevents python test from exiting + # with STATUS_CONTROL_C_EXIT (-1073741510) when break is sent. + self.start_node(node.index, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP) + else: + self.start_node(node.index) + + current_height = node.getblock(node.getbestblockhash())['height'] + + with ThreadPoolExecutor(max_workers=1) as ex: + # Call waitforblockheight with wait timeout longer than RPC timeout, + # so it is possible to distinguish whether it times out or returns + # early. If it times out it will throw an exception, and if it + # returns early it will return the current block height. + self.log.debug(f"Calling waitforblockheight with {self.rpc_timeout} sec RPC timeout") + fut = ex.submit(node.waitforblockheight, height=current_height+1, timeout=self.rpc_timeout*1000*2) + time.sleep(1) + + self.log.debug(f"Sending break signal to pid {node.process.pid}") + if platform.system() == 'Windows': + # Note: CTRL_C_EVENT should not be sent here because unlike + # CTRL_BREAK_EVENT it can not be targeted at a specific process + # group and may behave unpredictably. + node.process.send_signal(signal.CTRL_BREAK_EVENT) + else: + # Note: signal.SIGINT would work here as well + node.process.send_signal(signal.SIGTERM) + node.process.wait() + + try: + result = fut.result() + raise Exception(f"waitforblockheight returned {result!r}") + except JSONRPCException as e: + self.log.debug(f"waitforblockheight raised {e!r}") + assert_equal(e.error['code'], -344) # -344 is RPC timeout + node.wait_until_stopped() + def run_test(self): self.init_pid_test() self.init_stress_test() + self.break_wait_test() if __name__ == '__main__': From 663bc960f518a38d8870feec5bef8e51d4c846bf Mon Sep 17 00:00:00 2001 From: Ryan Ofsky Date: Wed, 1 Oct 2025 13:00:33 -0400 Subject: [PATCH 086/356] init: Signal m_tip_block_cv on Ctrl-C Signal m_tip_block_cv when Ctrl-C is pressed or SIGTERM is received, the same way it is currently signalled when the `stop` RPC is called. This lets RPC calls like `waitforblockheight` and IPC calls like `waitTipChanged` be interrupted, instead of waiting for their original timeouts and delaying shutdown. Note: the behavior where `stop` RPC signals `m_tip_block_cv` but CTRL-C does not has been around since the condition variable was introduced in #30409 (7eccdaf16081d6f624c4dc21df75b0474e049d2b). The signaling was later moved without changing behvior in #30967 (5ca28ef28bcca1775ff49921fc2528d9439b71ab). This commit moves it again to the Interrupt() function, which is probably the place it should have been added initially, so it works for Ctrl-C shutdowns as well as `stop` shutdowns. Github-Pull: #33511 Rebased-From: 68cad90dace40f7a015ca4ff81b878fc8fdc1dd5 --- src/init.cpp | 4 ++-- test/functional/feature_init.py | 15 +++++---------- 2 files changed, 7 insertions(+), 12 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 7fdbf75dc6..3ec4158cce 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -214,8 +214,6 @@ void InitContext(NodeContext& node) node.shutdown_request = [&node] { assert(node.shutdown_signal); if (!(*node.shutdown_signal)()) return false; - // Wake any threads that may be waiting for the tip to change. - if (node.notifications) WITH_LOCK(node.notifications->m_tip_block_mutex, node.notifications->m_tip_block_cv.notify_all()); return true; }; } @@ -266,6 +264,8 @@ void Interrupt(NodeContext& node) #if HAVE_SYSTEM ShutdownNotify(*node.args); #endif + // Wake any threads that may be waiting for the tip to change. + if (node.notifications) WITH_LOCK(node.notifications->m_tip_block_mutex, node.notifications->m_tip_block_cv.notify_all()); InterruptHTTPServer(); InterruptHTTPRPC(); InterruptRPC(); diff --git a/test/functional/feature_init.py b/test/functional/feature_init.py index 0b25fa2dca..47ec8caef2 100755 --- a/test/functional/feature_init.py +++ b/test/functional/feature_init.py @@ -12,7 +12,6 @@ import subprocess import time -from test_framework.authproxy import JSONRPCException from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ( BITCOIN_PID_FILENAME_DEFAULT, @@ -188,9 +187,8 @@ def break_wait_test(self): terminal. (This can be different than the node shutdown sequence that happens when the stop RPC is sent.) - Currently when the break signal is sent, it does not interrupt the - waitforblockheight RPC call, and the node does not exit until it times - out.""" + The waitforblockheight call should be interrupted and return right away, + and not time out.""" self.log.info("Testing waitforblockheight RPC call followed by break signal") node = self.nodes[0] @@ -224,12 +222,9 @@ def break_wait_test(self): node.process.send_signal(signal.SIGTERM) node.process.wait() - try: - result = fut.result() - raise Exception(f"waitforblockheight returned {result!r}") - except JSONRPCException as e: - self.log.debug(f"waitforblockheight raised {e!r}") - assert_equal(e.error['code'], -344) # -344 is RPC timeout + result = fut.result() + self.log.debug(f"waitforblockheight returned {result!r}") + assert_equal(result["height"], current_height) node.wait_until_stopped() def run_test(self): From d2c1bd10db78669e451b0ed703f0fa2cd82bc055 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 2 Oct 2025 15:37:15 +0000 Subject: [PATCH 087/356] Retain signalling `m_tip_block_cv` via `node.shutdown_request` Needed to interrupt blocking RPCs executed in the GUI --- src/init.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/init.cpp b/src/init.cpp index 3ec4158cce..d53bd0d49e 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -214,6 +214,8 @@ void InitContext(NodeContext& node) node.shutdown_request = [&node] { assert(node.shutdown_signal); if (!(*node.shutdown_signal)()) return false; + // Wake any threads that may be waiting for the tip to change. + if (node.notifications) WITH_LOCK(node.notifications->m_tip_block_mutex, node.notifications->m_tip_block_cv.notify_all()); return true; }; } From 77557f9316be62762d12c8369550fed0155302fe Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Wed, 19 Feb 2025 12:44:03 +0100 Subject: [PATCH 088/356] rpc: drop unneeded IsRPCRunning() guards This was preventing the (hidden) waitfornewblock, waitforblock and waitforblockheight methods from being used in the GUI. The check was added in d6a5dc4a2eaa0d7348804254ca09e75fc3a858ab when these RPC methods were first introduced. They could have been dropped when dca923150e3ac10a57c23a7e29e76516d32ec10d refactored these methods to use waitTipChanged(), which already checks for shutdown. Making this change now simplifies the next commit. Github-Pull: #31785 Rebased-From: a3bf43343f0d88ec9ff847a55fd48745aeebb429 --- src/rpc/blockchain.cpp | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index ac1ce6285f..f7f082af39 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -287,9 +287,7 @@ static RPCHelpMan waitfornewblock() Mining& miner = EnsureMining(node); auto block{CHECK_NONFATAL(miner.getTip()).value()}; - if (IsRPCRunning()) { - block = timeout ? miner.waitTipChanged(block.hash, std::chrono::milliseconds(timeout)) : miner.waitTipChanged(block.hash); - } + block = timeout ? miner.waitTipChanged(block.hash, std::chrono::milliseconds(timeout)) : miner.waitTipChanged(block.hash); UniValue ret(UniValue::VOBJ); ret.pushKV("hash", block.hash.GetHex()); @@ -334,7 +332,7 @@ static RPCHelpMan waitforblock() auto block{CHECK_NONFATAL(miner.getTip()).value()}; const auto deadline{std::chrono::steady_clock::now() + 1ms * timeout}; - while (IsRPCRunning() && block.hash != hash) { + while (block.hash != hash) { if (timeout) { auto now{std::chrono::steady_clock::now()}; if (now >= deadline) break; @@ -390,7 +388,7 @@ static RPCHelpMan waitforblockheight() auto block{CHECK_NONFATAL(miner.getTip()).value()}; const auto deadline{std::chrono::steady_clock::now() + 1ms * timeout}; - while (IsRPCRunning() && block.height < height) { + while (block.height < height) { if (timeout) { auto now{std::chrono::steady_clock::now()}; if (now >= deadline) break; From 9eb4ca33ab8d7796ecee0a0ef7504e9586b8f1d9 Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Wed, 19 Feb 2025 10:28:43 +0100 Subject: [PATCH 089/356] rpc: handle shutdown during long poll and wait methods The waitTipChanged() now returns nullopt if the node is shutting down. Previously it would return the last known tip during shutdown, but this creates an ambiguous circumstance in the scenario where the node is started and quickly shutdown, before notifications().TipBlock() is set. The getblocktemplate, waitfornewblock and waitforblockheight RPC are updated to handle this. Existing behavior is preserved. Co-authored-by: Ryan Ofsky Github-Pull: #31785 Rebased-From: 64a2795fd4fe223a55564c31e9fa36264e79ac22 --- src/interfaces/mining.h | 7 +++--- src/node/interfaces.cpp | 28 ++++++++++++++++------- src/rpc/blockchain.cpp | 50 ++++++++++++++++++++++++++++------------- src/rpc/mining.cpp | 6 ++++- 4 files changed, 63 insertions(+), 28 deletions(-) diff --git a/src/interfaces/mining.h b/src/interfaces/mining.h index bc5955ded6..c5fd7bd336 100644 --- a/src/interfaces/mining.h +++ b/src/interfaces/mining.h @@ -76,14 +76,15 @@ class Mining /** * Waits for the connected tip to change. During node initialization, this will - * wait until the tip is connected. + * wait until the tip is connected (regardless of `timeout`). * * @param[in] current_tip block hash of the current chain tip. Function waits * for the chain tip to differ from this. * @param[in] timeout how long to wait for a new tip - * @returns Hash and height of the current chain tip after this call. + * @retval BlockRef hash and height of the current chain tip after this call. + * @retval std::nullopt if the node is shut down. */ - virtual BlockRef waitTipChanged(uint256 current_tip, MillisecondsDouble timeout = MillisecondsDouble::max()) = 0; + virtual std::optional waitTipChanged(uint256 current_tip, MillisecondsDouble timeout = MillisecondsDouble::max()) = 0; /** * Construct a new block template diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index f28e5dffbd..d751af573d 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -971,20 +971,32 @@ class MinerImpl : public Mining return BlockRef{tip->GetBlockHash(), tip->nHeight}; } - BlockRef waitTipChanged(uint256 current_tip, MillisecondsDouble timeout) override + std::optional waitTipChanged(uint256 current_tip, MillisecondsDouble timeout) override { if (timeout > std::chrono::years{100}) timeout = std::chrono::years{100}; // Upper bound to avoid UB in std::chrono + auto deadline{std::chrono::steady_clock::now() + timeout}; { WAIT_LOCK(notifications().m_tip_block_mutex, lock); - notifications().m_tip_block_cv.wait_for(lock, timeout, [&]() EXCLUSIVE_LOCKS_REQUIRED(notifications().m_tip_block_mutex) { - // We need to wait for m_tip_block to be set AND for the value - // to differ from the current_tip value. - return (notifications().TipBlock() && notifications().TipBlock() != current_tip) || chainman().m_interrupt; + // For callers convenience, wait longer than the provided timeout + // during startup for the tip to be non-null. That way this function + // always returns valid tip information when possible and only + // returns null when shutting down, not when timing out. + notifications().m_tip_block_cv.wait(lock, [&]() EXCLUSIVE_LOCKS_REQUIRED(notifications().m_tip_block_mutex) { + return notifications().TipBlock() || chainman().m_interrupt; + }); + if (chainman().m_interrupt) return {}; + // At this point TipBlock is set, so continue to wait until it is + // different then `current_tip` provided by caller. + notifications().m_tip_block_cv.wait_until(lock, deadline, [&]() EXCLUSIVE_LOCKS_REQUIRED(notifications().m_tip_block_mutex) { + return Assume(notifications().TipBlock()) != current_tip || chainman().m_interrupt; }); } - // Must release m_tip_block_mutex before locking cs_main, to avoid deadlocks. - LOCK(::cs_main); - return BlockRef{chainman().ActiveChain().Tip()->GetBlockHash(), chainman().ActiveChain().Tip()->nHeight}; + + if (chainman().m_interrupt) return {}; + + // Must release m_tip_block_mutex before getTip() locks cs_main, to + // avoid deadlocks. + return getTip(); } std::unique_ptr createNewBlock(const BlockCreateOptions& options) override diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index f7f082af39..6e5c656f3d 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -64,6 +64,7 @@ using kernel::CCoinsStats; using kernel::CoinStatsHashType; +using interfaces::BlockRef; using interfaces::Mining; using node::BlockManager; using node::NodeContext; @@ -286,12 +287,17 @@ static RPCHelpMan waitfornewblock() NodeContext& node = EnsureAnyNodeContext(request.context); Mining& miner = EnsureMining(node); - auto block{CHECK_NONFATAL(miner.getTip()).value()}; - block = timeout ? miner.waitTipChanged(block.hash, std::chrono::milliseconds(timeout)) : miner.waitTipChanged(block.hash); + // Abort if RPC came out of warmup too early + BlockRef current_block{CHECK_NONFATAL(miner.getTip()).value()}; + std::optional block = timeout ? miner.waitTipChanged(current_block.hash, std::chrono::milliseconds(timeout)) : + miner.waitTipChanged(current_block.hash); + + // Return current block upon shutdown + if (block) current_block = *block; UniValue ret(UniValue::VOBJ); - ret.pushKV("hash", block.hash.GetHex()); - ret.pushKV("height", block.height); + ret.pushKV("hash", current_block.hash.GetHex()); + ret.pushKV("height", current_block.height); return ret; }, }; @@ -330,22 +336,28 @@ static RPCHelpMan waitforblock() NodeContext& node = EnsureAnyNodeContext(request.context); Mining& miner = EnsureMining(node); - auto block{CHECK_NONFATAL(miner.getTip()).value()}; + // Abort if RPC came out of warmup too early + BlockRef current_block{CHECK_NONFATAL(miner.getTip()).value()}; + const auto deadline{std::chrono::steady_clock::now() + 1ms * timeout}; - while (block.hash != hash) { + while (current_block.hash != hash) { + std::optional block; if (timeout) { auto now{std::chrono::steady_clock::now()}; if (now >= deadline) break; const MillisecondsDouble remaining{deadline - now}; - block = miner.waitTipChanged(block.hash, remaining); + block = miner.waitTipChanged(current_block.hash, remaining); } else { - block = miner.waitTipChanged(block.hash); + block = miner.waitTipChanged(current_block.hash); } + // Return current block upon shutdown + if (!block) break; + current_block = *block; } UniValue ret(UniValue::VOBJ); - ret.pushKV("hash", block.hash.GetHex()); - ret.pushKV("height", block.height); + ret.pushKV("hash", current_block.hash.GetHex()); + ret.pushKV("height", current_block.height); return ret; }, }; @@ -385,23 +397,29 @@ static RPCHelpMan waitforblockheight() NodeContext& node = EnsureAnyNodeContext(request.context); Mining& miner = EnsureMining(node); - auto block{CHECK_NONFATAL(miner.getTip()).value()}; + // Abort if RPC came out of warmup too early + BlockRef current_block{CHECK_NONFATAL(miner.getTip()).value()}; + const auto deadline{std::chrono::steady_clock::now() + 1ms * timeout}; - while (block.height < height) { + while (current_block.height < height) { + std::optional block; if (timeout) { auto now{std::chrono::steady_clock::now()}; if (now >= deadline) break; const MillisecondsDouble remaining{deadline - now}; - block = miner.waitTipChanged(block.hash, remaining); + block = miner.waitTipChanged(current_block.hash, remaining); } else { - block = miner.waitTipChanged(block.hash); + block = miner.waitTipChanged(current_block.hash); } + // Return current block on shutdown + if (!block) break; + current_block = *block; } UniValue ret(UniValue::VOBJ); - ret.pushKV("hash", block.hash.GetHex()); - ret.pushKV("height", block.height); + ret.pushKV("hash", current_block.hash.GetHex()); + ret.pushKV("height", current_block.height); return ret; }, }; diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index b527e686a4..ada1a15321 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -46,6 +46,7 @@ #include #include +using interfaces::BlockRef; using interfaces::BlockTemplate; using interfaces::Mining; using node::BlockAssembler; @@ -801,7 +802,10 @@ static RPCHelpMan getblocktemplate() { MillisecondsDouble checktxtime{std::chrono::minutes(1)}; while (tip == hashWatchedChain && IsRPCRunning()) { - tip = miner.waitTipChanged(hashWatchedChain, checktxtime).hash; + std::optional maybe_tip{miner.waitTipChanged(hashWatchedChain, checktxtime)}; + // Node is shutting down + if (!maybe_tip) break; + tip = maybe_tip->hash; // Timeout: Check transactions for update // without holding the mempool lock to avoid deadlocks if (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP) From 3968d7b5b90fd9fe1e02e00d0d15d18819680c4d Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Mon, 3 Feb 2025 18:11:00 +0100 Subject: [PATCH 090/356] Have createNewBlock() wait for a tip - return null on shutdown instead of the last tip - ignore timeout value node initialization This allows consumers of BlockTemplate to safely assume that a tip is connected, instead of having to account for startup and early shutdown scenarios. Github-Pull: #31785 Rebased-From: 5315278e7c7fb961fd749cd8e991d5c5c66dde11 --- src/interfaces/mining.h | 7 +++++-- src/node/interfaces.cpp | 3 +++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/src/interfaces/mining.h b/src/interfaces/mining.h index c5fd7bd336..73f907a0d1 100644 --- a/src/interfaces/mining.h +++ b/src/interfaces/mining.h @@ -87,10 +87,13 @@ class Mining virtual std::optional waitTipChanged(uint256 current_tip, MillisecondsDouble timeout = MillisecondsDouble::max()) = 0; /** - * Construct a new block template + * Construct a new block template. + * + * During node initialization, this will wait until the tip is connected. * * @param[in] options options for creating the block - * @returns a block template + * @retval BlockTemplate a block template. + * @retval std::nullptr if the node is shut down. */ virtual std::unique_ptr createNewBlock(const node::BlockCreateOptions& options = {}) = 0; diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index d751af573d..40a4200b71 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -1001,6 +1001,9 @@ class MinerImpl : public Mining std::unique_ptr createNewBlock(const BlockCreateOptions& options) override { + // Ensure m_tip_block is set so consumers of BlockTemplate can rely on that. + if (!waitTipChanged(uint256::ZERO, MillisecondsDouble::max())) return {}; + BlockAssembler::Options assemble_options{options}; ApplyArgsManOptions(*Assert(m_node.args), assemble_options); return std::make_unique(BlockAssembler{chainman().ActiveChainstate(), context()->mempool.get(), assemble_options}.CreateNewBlock(), m_node); From 9a65f0d1feb44d282ab1516c32a9dda3c773406e Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 2 Oct 2025 16:25:43 +0000 Subject: [PATCH 091/356] Diff-minimise --- src/interfaces/mining.h | 12 ++++------ src/rpc/blockchain.cpp | 53 +++++++++++++++++++---------------------- 2 files changed, 28 insertions(+), 37 deletions(-) diff --git a/src/interfaces/mining.h b/src/interfaces/mining.h index 73f907a0d1..8a576b75d1 100644 --- a/src/interfaces/mining.h +++ b/src/interfaces/mining.h @@ -76,24 +76,20 @@ class Mining /** * Waits for the connected tip to change. During node initialization, this will - * wait until the tip is connected (regardless of `timeout`). + * wait until the tip is connected. * * @param[in] current_tip block hash of the current chain tip. Function waits * for the chain tip to differ from this. * @param[in] timeout how long to wait for a new tip - * @retval BlockRef hash and height of the current chain tip after this call. - * @retval std::nullopt if the node is shut down. + * @returns Hash and height of the current chain tip after this call. */ virtual std::optional waitTipChanged(uint256 current_tip, MillisecondsDouble timeout = MillisecondsDouble::max()) = 0; /** - * Construct a new block template. - * - * During node initialization, this will wait until the tip is connected. + * Construct a new block template * * @param[in] options options for creating the block - * @retval BlockTemplate a block template. - * @retval std::nullptr if the node is shut down. + * @returns a block template */ virtual std::unique_ptr createNewBlock(const node::BlockCreateOptions& options = {}) = 0; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 6e5c656f3d..1273a37713 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -287,17 +287,16 @@ static RPCHelpMan waitfornewblock() NodeContext& node = EnsureAnyNodeContext(request.context); Mining& miner = EnsureMining(node); - // Abort if RPC came out of warmup too early - BlockRef current_block{CHECK_NONFATAL(miner.getTip()).value()}; - std::optional block = timeout ? miner.waitTipChanged(current_block.hash, std::chrono::milliseconds(timeout)) : - miner.waitTipChanged(current_block.hash); + auto block{CHECK_NONFATAL(miner.getTip()).value()}; + std::optional new_block = timeout ? miner.waitTipChanged(block.hash, std::chrono::milliseconds(timeout)) : + miner.waitTipChanged(block.hash); // Return current block upon shutdown - if (block) current_block = *block; + if (new_block) block = *new_block; UniValue ret(UniValue::VOBJ); - ret.pushKV("hash", current_block.hash.GetHex()); - ret.pushKV("height", current_block.height); + ret.pushKV("hash", block.hash.GetHex()); + ret.pushKV("height", block.height); return ret; }, }; @@ -336,28 +335,26 @@ static RPCHelpMan waitforblock() NodeContext& node = EnsureAnyNodeContext(request.context); Mining& miner = EnsureMining(node); - // Abort if RPC came out of warmup too early - BlockRef current_block{CHECK_NONFATAL(miner.getTip()).value()}; - + auto block{CHECK_NONFATAL(miner.getTip()).value()}; const auto deadline{std::chrono::steady_clock::now() + 1ms * timeout}; - while (current_block.hash != hash) { - std::optional block; + while (block.hash != hash) { + std::optional new_block; if (timeout) { auto now{std::chrono::steady_clock::now()}; if (now >= deadline) break; const MillisecondsDouble remaining{deadline - now}; - block = miner.waitTipChanged(current_block.hash, remaining); + new_block = miner.waitTipChanged(block.hash, remaining); } else { - block = miner.waitTipChanged(current_block.hash); + new_block = miner.waitTipChanged(block.hash); } // Return current block upon shutdown - if (!block) break; - current_block = *block; + if (!new_block) break; + block = *new_block; } UniValue ret(UniValue::VOBJ); - ret.pushKV("hash", current_block.hash.GetHex()); - ret.pushKV("height", current_block.height); + ret.pushKV("hash", block.hash.GetHex()); + ret.pushKV("height", block.height); return ret; }, }; @@ -397,29 +394,27 @@ static RPCHelpMan waitforblockheight() NodeContext& node = EnsureAnyNodeContext(request.context); Mining& miner = EnsureMining(node); - // Abort if RPC came out of warmup too early - BlockRef current_block{CHECK_NONFATAL(miner.getTip()).value()}; - + auto block{CHECK_NONFATAL(miner.getTip()).value()}; const auto deadline{std::chrono::steady_clock::now() + 1ms * timeout}; - while (current_block.height < height) { - std::optional block; + while (block.height < height) { + std::optional new_block; if (timeout) { auto now{std::chrono::steady_clock::now()}; if (now >= deadline) break; const MillisecondsDouble remaining{deadline - now}; - block = miner.waitTipChanged(current_block.hash, remaining); + new_block = miner.waitTipChanged(block.hash, remaining); } else { - block = miner.waitTipChanged(current_block.hash); + new_block = miner.waitTipChanged(block.hash); } // Return current block on shutdown - if (!block) break; - current_block = *block; + if (!new_block) break; + block = *new_block; } UniValue ret(UniValue::VOBJ); - ret.pushKV("hash", current_block.hash.GetHex()); - ret.pushKV("height", current_block.height); + ret.pushKV("hash", block.hash.GetHex()); + ret.pushKV("height", block.height); return ret; }, }; From 28fc9820345a8a6068d051e79895030802417beb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Tue, 15 Jul 2025 14:54:52 -0700 Subject: [PATCH 092/356] random: add fixed-size `std::array` generation Co-authored-by: Hodlinator <172445034+hodlinator@users.noreply.github.com> Github-Pull: #31144 Rebased-From: 7aa557a37b73df264afffdc7c00fba47a339aee0 --- src/random.h | 9 +++++++++ src/test/random_tests.cpp | 2 +- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/random.h b/src/random.h index 203678b17c..a83733b773 100644 --- a/src/random.h +++ b/src/random.h @@ -301,6 +301,15 @@ class RandomMixin return ret; } + /** Generate fixed-size random bytes. */ + template + std::array randbytes() noexcept + { + std::array ret; + Impl().fillrand(MakeWritableByteSpan(ret)); + return ret; + } + /** Generate a random 32-bit integer. */ uint32_t rand32() noexcept { return Impl().template randbits<32>(); } diff --git a/src/test/random_tests.cpp b/src/test/random_tests.cpp index 3d8b543e64..538d41125a 100644 --- a/src/test/random_tests.cpp +++ b/src/test/random_tests.cpp @@ -58,7 +58,7 @@ BOOST_AUTO_TEST_CASE(fastrandom_tests_deterministic) BOOST_CHECK_EQUAL(ctx1.rand32(), ctx2.rand32()); BOOST_CHECK_EQUAL(ctx1.rand64(), ctx2.rand64()); BOOST_CHECK_EQUAL(ctx1.randbits(3), ctx2.randbits(3)); - BOOST_CHECK(ctx1.randbytes(17) == ctx2.randbytes(17)); + BOOST_CHECK(std::ranges::equal(ctx1.randbytes(17), ctx2.randbytes<17>())); // check vector/array behavior symmetry BOOST_CHECK(ctx1.rand256() == ctx2.rand256()); BOOST_CHECK_EQUAL(ctx1.randbits(7), ctx2.randbits(7)); BOOST_CHECK(ctx1.randbytes(128) == ctx2.randbytes(128)); From 9106f217306a6b3ab5aa62e34485cc8b41dd009b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Tue, 15 Jul 2025 14:54:58 -0700 Subject: [PATCH 093/356] refactor: commit to 8 byte obfuscation keys Since 31 byte xor-keys are not used in the codebase, using the common size (8 bytes) makes the benchmarks more realistic. Co-authored-by: maflcko <6399679+maflcko@users.noreply.github.com> Github-Pull: #31144 Rebased-From: 54ab0bd64c3657f2f9be83ba587a0811e0dd8ca1 --- src/bench/xor.cpp | 3 ++- src/dbwrapper.cpp | 7 +++---- src/dbwrapper.h | 3 --- src/node/blockstorage.cpp | 3 ++- src/node/mempool_persist.cpp | 3 ++- src/test/fuzz/autofile.cpp | 6 ++++-- src/test/fuzz/buffered_file.cpp | 6 ++++-- src/test/streams_tests.cpp | 5 +++-- src/util/obfuscation.h | 16 ++++++++++++++++ 9 files changed, 36 insertions(+), 16 deletions(-) create mode 100644 src/util/obfuscation.h diff --git a/src/bench/xor.cpp b/src/bench/xor.cpp index fc9dc5d172..f3d6145c2b 100644 --- a/src/bench/xor.cpp +++ b/src/bench/xor.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -14,7 +15,7 @@ static void Xor(benchmark::Bench& bench) { FastRandomContext frc{/*fDeterministic=*/true}; auto data{frc.randbytes(1024)}; - auto key{frc.randbytes(31)}; + auto key{frc.randbytes(Obfuscation::KEY_SIZE)}; bench.batch(data.size()).unit("byte").run([&] { util::Xor(data, key); diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp index 8fb366515a..41ea7789ea 100644 --- a/src/dbwrapper.cpp +++ b/src/dbwrapper.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -256,7 +257,7 @@ CDBWrapper::CDBWrapper(const DBParams& params) } // The base-case obfuscation key, which is a noop. - obfuscate_key = std::vector(OBFUSCATE_KEY_NUM_BYTES, '\000'); + obfuscate_key = std::vector(Obfuscation::KEY_SIZE, '\000'); bool key_exists = Read(OBFUSCATE_KEY_KEY, obfuscate_key); @@ -323,15 +324,13 @@ size_t CDBWrapper::DynamicMemoryUsage() const // past the null-terminator. const std::string CDBWrapper::OBFUSCATE_KEY_KEY("\000obfuscate_key", 14); -const unsigned int CDBWrapper::OBFUSCATE_KEY_NUM_BYTES = 8; - /** * Returns a string (consisting of 8 random bytes) suitable for use as an * obfuscating XOR key. */ std::vector CDBWrapper::CreateObfuscateKey() const { - std::vector ret(OBFUSCATE_KEY_NUM_BYTES); + std::vector ret(Obfuscation::KEY_SIZE); GetRandBytes(ret); return ret; } diff --git a/src/dbwrapper.h b/src/dbwrapper.h index dd5daa7a1f..d07ac3b671 100644 --- a/src/dbwrapper.h +++ b/src/dbwrapper.h @@ -195,9 +195,6 @@ class CDBWrapper //! the key under which the obfuscation key is stored static const std::string OBFUSCATE_KEY_KEY; - //! the length of the obfuscate key in number of bytes - static const unsigned int OBFUSCATE_KEY_NUM_BYTES; - std::vector CreateObfuscateKey() const; //! path to filesystem storage diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 4179cc8c64..f361edc303 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -1136,7 +1137,7 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) { // Bytes are serialized without length indicator, so this is also the exact // size of the XOR-key file. - std::array xor_key{}; + std::array xor_key{}; // Consider this to be the first run if the blocksdir contains only hidden // files (those which start with a .). Checking for a fully-empty dir would diff --git a/src/node/mempool_persist.cpp b/src/node/mempool_persist.cpp index a78838c71e..e09f318c68 100644 --- a/src/node/mempool_persist.cpp +++ b/src/node/mempool_persist.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -178,7 +179,7 @@ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, FopenFn mock const uint64_t version{pool.m_opts.persist_v1_dat ? MEMPOOL_DUMP_VERSION_NO_XOR_KEY : MEMPOOL_DUMP_VERSION}; file << version; - std::vector xor_key(8); + std::vector xor_key(Obfuscation::KEY_SIZE); if (!pool.m_opts.persist_v1_dat) { FastRandomContext{}.fillrand(xor_key); file << xor_key; diff --git a/src/test/fuzz/autofile.cpp b/src/test/fuzz/autofile.cpp index f081ca5545..aced09ab9b 100644 --- a/src/test/fuzz/autofile.cpp +++ b/src/test/fuzz/autofile.cpp @@ -4,9 +4,10 @@ #include #include -#include #include +#include #include +#include #include #include @@ -18,9 +19,10 @@ FUZZ_TARGET(autofile) { FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; FuzzedFileProvider fuzzed_file_provider{fuzzed_data_provider}; + const auto key_bytes{ConsumeFixedLengthByteVector(fuzzed_data_provider, Obfuscation::KEY_SIZE)}; AutoFile auto_file{ fuzzed_file_provider.open(), - ConsumeRandomLengthByteVector(fuzzed_data_provider), + key_bytes, }; LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 100) { diff --git a/src/test/fuzz/buffered_file.cpp b/src/test/fuzz/buffered_file.cpp index 2923c39aaf..0f3118bc6e 100644 --- a/src/test/fuzz/buffered_file.cpp +++ b/src/test/fuzz/buffered_file.cpp @@ -4,9 +4,10 @@ #include #include -#include #include +#include #include +#include #include #include @@ -20,9 +21,10 @@ FUZZ_TARGET(buffered_file) FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; FuzzedFileProvider fuzzed_file_provider{fuzzed_data_provider}; std::optional opt_buffered_file; + const auto key_bytes{ConsumeFixedLengthByteVector(fuzzed_data_provider, Obfuscation::KEY_SIZE)}; AutoFile fuzzed_file{ fuzzed_file_provider.open(), - ConsumeRandomLengthByteVector(fuzzed_data_provider), + key_bytes, }; try { auto n_buf_size = fuzzed_data_provider.ConsumeIntegralInRange(0, 4096); diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index eaf8f52aae..f406b7ce89 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -563,7 +564,7 @@ BOOST_AUTO_TEST_CASE(buffered_reader_matches_autofile_random_content) const FlatFilePos pos{0, 0}; const FlatFileSeq test_file{m_args.GetDataDirBase(), "buffered_file_test_random", node::BLOCKFILE_CHUNK_SIZE}; - const std::vector obfuscation{m_rng.randbytes(8)}; + const std::vector obfuscation{m_rng.randbytes(Obfuscation::KEY_SIZE)}; // Write out the file with random content { @@ -618,7 +619,7 @@ BOOST_AUTO_TEST_CASE(buffered_writer_matches_autofile_random_content) const FlatFileSeq test_buffered{m_args.GetDataDirBase(), "buffered_write_test", node::BLOCKFILE_CHUNK_SIZE}; const FlatFileSeq test_direct{m_args.GetDataDirBase(), "direct_write_test", node::BLOCKFILE_CHUNK_SIZE}; - const std::vector obfuscation{m_rng.randbytes(8)}; + const std::vector obfuscation{m_rng.randbytes(Obfuscation::KEY_SIZE)}; { DataBuffer test_data{m_rng.randbytes(file_size)}; diff --git a/src/util/obfuscation.h b/src/util/obfuscation.h new file mode 100644 index 0000000000..628dacfc9d --- /dev/null +++ b/src/util/obfuscation.h @@ -0,0 +1,16 @@ +// Copyright (c) 2025-present The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_UTIL_OBFUSCATION_H +#define BITCOIN_UTIL_OBFUSCATION_H + +#include + +class Obfuscation +{ +public: + static constexpr size_t KEY_SIZE{sizeof(uint64_t)}; +}; + +#endif // BITCOIN_UTIL_OBFUSCATION_H From e17617cb5af8c23a355a3ed887dbd028f0e785b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Wed, 16 Jul 2025 14:09:56 -0700 Subject: [PATCH 094/356] test: make sure dbwrapper obfuscation key is never obfuscated Github-Pull: #31144 Rebased-From: a5141cd39ecbd3a7bbae5bf2f755cc5aa7c41da3 --- src/test/dbwrapper_tests.cpp | 60 +++++++++++++++++++++++++++--------- 1 file changed, 46 insertions(+), 14 deletions(-) diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp index 3a86036327..bd50db17c1 100644 --- a/src/test/dbwrapper_tests.cpp +++ b/src/test/dbwrapper_tests.cpp @@ -30,18 +30,50 @@ BOOST_AUTO_TEST_CASE(dbwrapper) { // Perform tests both obfuscated and non-obfuscated. for (const bool obfuscate : {false, true}) { - fs::path ph = m_args.GetDataDirBase() / (obfuscate ? "dbwrapper_obfuscate_true" : "dbwrapper_obfuscate_false"); - CDBWrapper dbw({.path = ph, .cache_bytes = 1 << 20, .memory_only = true, .wipe_data = false, .obfuscate = obfuscate}); - uint8_t key{'k'}; - uint256 in = m_rng.rand256(); - uint256 res; + constexpr size_t CACHE_SIZE{1_MiB}; + const fs::path path{m_args.GetDataDirBase() / "dbwrapper"}; + + std::vector obfuscation_key{}; + std::vector> key_values{}; + + // Write values + { + CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .wipe_data = true, .obfuscate = obfuscate}}; + BOOST_CHECK_EQUAL(obfuscate, !dbw.IsEmpty()); + + // Ensure that we're doing real obfuscation when obfuscate=true + obfuscation_key = dbwrapper_private::GetObfuscateKey(dbw); + BOOST_CHECK_EQUAL(obfuscate, !is_null_key(obfuscation_key)); + + for (uint8_t k{0}; k < 10; ++k) { + uint8_t key{k}; + uint256 value{m_rng.rand256()}; + BOOST_CHECK(dbw.Write(key, value)); + key_values.emplace_back(key, value); + } + } - // Ensure that we're doing real obfuscation when obfuscate=true - BOOST_CHECK(obfuscate != is_null_key(dbwrapper_private::GetObfuscateKey(dbw))); + // Verify that the obfuscation key is never obfuscated + { + CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .obfuscate = false}}; + BOOST_CHECK(obfuscation_key == dbwrapper_private::GetObfuscateKey(dbw)); + } - BOOST_CHECK(dbw.Write(key, in)); - BOOST_CHECK(dbw.Read(key, res)); - BOOST_CHECK_EQUAL(res.ToString(), in.ToString()); + // Read back the values + { + CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .obfuscate = obfuscate}}; + + // Ensure obfuscation is read back correctly + BOOST_CHECK(obfuscation_key == dbwrapper_private::GetObfuscateKey(dbw)); + BOOST_CHECK_EQUAL(obfuscate, !is_null_key(obfuscation_key)); + + // Verify all written values + for (const auto& [key, expected_value] : key_values) { + uint256 read_value{}; + BOOST_CHECK(dbw.Read(key, read_value)); + BOOST_CHECK_EQUAL(read_value, expected_value); + } + } } } @@ -57,7 +89,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data) bool res_bool; // Ensure that we're doing real obfuscation when obfuscate=true - BOOST_CHECK(obfuscate != is_null_key(dbwrapper_private::GetObfuscateKey(dbw))); + BOOST_CHECK_EQUAL(obfuscate, !is_null_key(dbwrapper_private::GetObfuscateKey(dbw))); //Simulate block raw data - "b + block hash" std::string key_block = "b" + m_rng.rand256().ToString(); @@ -116,13 +148,13 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data) std::string file_option_tag = "F"; uint8_t filename_length = m_rng.randbits(8); std::string filename = "randomfilename"; - std::string key_file_option = strprintf("%s%01x%s", file_option_tag,filename_length,filename); + std::string key_file_option = strprintf("%s%01x%s", file_option_tag, filename_length, filename); bool in_file_bool = m_rng.randbool(); BOOST_CHECK(dbw.Write(key_file_option, in_file_bool)); BOOST_CHECK(dbw.Read(key_file_option, res_bool)); BOOST_CHECK_EQUAL(res_bool, in_file_bool); - } + } } // Test batch operations @@ -231,7 +263,7 @@ BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate) BOOST_CHECK(odbw.Read(key, res2)); BOOST_CHECK_EQUAL(res2.ToString(), in.ToString()); - BOOST_CHECK(!odbw.IsEmpty()); // There should be existing data + BOOST_CHECK(!odbw.IsEmpty()); BOOST_CHECK(is_null_key(dbwrapper_private::GetObfuscateKey(odbw))); // The key should be an empty string uint256 in2 = m_rng.rand256(); From e5362bfc2b423b1005ec636cc6faa8deaabc0930 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Wed, 16 Jul 2025 14:09:59 -0700 Subject: [PATCH 095/356] test: compare util::Xor with randomized inputs against simple impl The two tests are doing different things - `xor_roundtrip_random_chunks` does black-box style property-based testing to validate that certain invariants hold - that deobfuscating an obfuscation results in the original message (higher level, it doesn't have to know about the implementation details). The `xor_bytes_reference` test makes sure the optimized xor implementation behaves in every imaginable scenario exactly as the simplest possible obfuscation - with random chunks, random alignment, random data, random key. Since we're touching the file, other related small refactors were also applied: * `nullpt` typo fixed; * manual byte-by-byte xor key creations were replaced with `_hex` factories; * since we're only using 64 bit keys in production, smaller keys were changed to reflect real-world usage; Co-authored-by: Hodlinator <172445034+hodlinator@users.noreply.github.com> Github-Pull: #31144 Rebased-From: 618a30e326e9bcfd72e0e2645ce49f8b2a88714d --- src/test/streams_tests.cpp | 99 ++++++++++++++++++++++++++++---------- 1 file changed, 74 insertions(+), 25 deletions(-) diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index f406b7ce89..196420fd67 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -14,22 +14,79 @@ #include using namespace std::string_literals; +using namespace util::hex_literals; BOOST_FIXTURE_TEST_SUITE(streams_tests, BasicTestingSetup) +// Test that obfuscation can be properly reverted even with random chunk sizes. +BOOST_AUTO_TEST_CASE(xor_roundtrip_random_chunks) +{ + auto apply_random_xor_chunks{[&](std::span target, std::span obfuscation) { + for (size_t offset{0}; offset < target.size();) { + const size_t chunk_size{1 + m_rng.randrange(target.size() - offset)}; + util::Xor(target.subspan(offset, chunk_size), obfuscation, offset); + offset += chunk_size; + } + }}; + + for (size_t test{0}; test < 100; ++test) { + const size_t write_size{1 + m_rng.randrange(100U)}; + const std::vector original{m_rng.randbytes(write_size)}; + std::vector roundtrip{original}; + + const auto key_bytes{m_rng.randbool() ? m_rng.randbytes() : std::array{}}; + apply_random_xor_chunks(roundtrip, key_bytes); + + const bool key_all_zeros{std::ranges::all_of( + std::span{key_bytes}.first(std::min(write_size, Obfuscation::KEY_SIZE)), [](auto b) { return b == std::byte{0}; })}; + BOOST_CHECK(key_all_zeros ? original == roundtrip : original != roundtrip); + + apply_random_xor_chunks(roundtrip, key_bytes); + BOOST_CHECK(original == roundtrip); + } +} + +// Compares optimized obfuscation against a trivial, byte-by-byte reference implementation +// with random offsets to ensure proper handling of key wrapping. +BOOST_AUTO_TEST_CASE(xor_bytes_reference) +{ + auto expected_xor{[](std::span target, std::span obfuscation, size_t key_offset) { + for (auto& b : target) { + b ^= obfuscation[key_offset++ % obfuscation.size()]; + } + }}; + + for (size_t test{0}; test < 100; ++test) { + const size_t write_size{1 + m_rng.randrange(100U)}; + const size_t key_offset{m_rng.randrange(3 * Obfuscation::KEY_SIZE)}; // Make sure the key can wrap around + const size_t write_offset{std::min(write_size, m_rng.randrange(Obfuscation::KEY_SIZE * 2))}; // Write unaligned data + + const auto key_bytes{m_rng.randbool() ? m_rng.randbytes() : std::array{}}; + const std::vector obfuscation{key_bytes.begin(), key_bytes.end()}; + std::vector expected{m_rng.randbytes(write_size)}; + std::vector actual{expected}; + + expected_xor(std::span{expected}.subspan(write_offset), key_bytes, key_offset); + util::Xor(std::span{actual}.subspan(write_offset), key_bytes, key_offset); + + BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), actual.begin(), actual.end()); + } +} + BOOST_AUTO_TEST_CASE(xor_file) { fs::path xor_path{m_args.GetDataDirBase() / "test_xor.bin"}; auto raw_file{[&](const auto& mode) { return fsbridge::fopen(xor_path, mode); }}; const std::vector test1{1, 2, 3}; const std::vector test2{4, 5}; - const std::vector xor_pat{std::byte{0xff}, std::byte{0x00}}; + const auto xor_pat{"ff00ff00ff00ff00"_hex_v}; + { // Check errors for missing file AutoFile xor_file{raw_file("rb"), xor_pat}; - BOOST_CHECK_EXCEPTION(xor_file << std::byte{}, std::ios_base::failure, HasReason{"AutoFile::write: file handle is nullpt"}); - BOOST_CHECK_EXCEPTION(xor_file >> std::byte{}, std::ios_base::failure, HasReason{"AutoFile::read: file handle is nullpt"}); - BOOST_CHECK_EXCEPTION(xor_file.ignore(1), std::ios_base::failure, HasReason{"AutoFile::ignore: file handle is nullpt"}); + BOOST_CHECK_EXCEPTION(xor_file << std::byte{}, std::ios_base::failure, HasReason{"AutoFile::write: file handle is nullptr"}); + BOOST_CHECK_EXCEPTION(xor_file >> std::byte{}, std::ios_base::failure, HasReason{"AutoFile::read: file handle is nullptr"}); + BOOST_CHECK_EXCEPTION(xor_file.ignore(1), std::ios_base::failure, HasReason{"AutoFile::ignore: file handle is nullptr"}); } { #ifdef __MINGW64__ @@ -77,7 +134,7 @@ BOOST_AUTO_TEST_CASE(streams_vector_writer) { unsigned char a(1); unsigned char b(2); - unsigned char bytes[] = { 3, 4, 5, 6 }; + unsigned char bytes[] = {3, 4, 5, 6}; std::vector vch; // Each test runs twice. Serializing a second time at the same starting @@ -224,34 +281,26 @@ BOOST_AUTO_TEST_CASE(bitstream_reader_writer) BOOST_AUTO_TEST_CASE(streams_serializedata_xor) { - std::vector in; - // Degenerate case { - DataStream ds{in}; - ds.Xor({0x00, 0x00}); + DataStream ds{}; + ds.Xor("0000000000000000"_hex_v_u8); BOOST_CHECK_EQUAL(""s, ds.str()); } - in.push_back(std::byte{0x0f}); - in.push_back(std::byte{0xf0}); - - // Single character key { - DataStream ds{in}; - ds.Xor({0xff}); + const auto obfuscation{"ffffffffffffffff"_hex_v_u8}; + + DataStream ds{"0ff0"_hex}; + ds.Xor(obfuscation); BOOST_CHECK_EQUAL("\xf0\x0f"s, ds.str()); } - // Multi character key - - in.clear(); - in.push_back(std::byte{0xf0}); - in.push_back(std::byte{0x0f}); - { - DataStream ds{in}; - ds.Xor({0xff, 0x0f}); + const auto obfuscation{"ff0fff0fff0fff0f"_hex_v_u8}; + + DataStream ds{"f00f"_hex}; + ds.Xor(obfuscation); BOOST_CHECK_EQUAL("\x0f\x00"s, ds.str()); } } @@ -564,7 +613,7 @@ BOOST_AUTO_TEST_CASE(buffered_reader_matches_autofile_random_content) const FlatFilePos pos{0, 0}; const FlatFileSeq test_file{m_args.GetDataDirBase(), "buffered_file_test_random", node::BLOCKFILE_CHUNK_SIZE}; - const std::vector obfuscation{m_rng.randbytes(Obfuscation::KEY_SIZE)}; + const auto obfuscation{m_rng.randbytes(Obfuscation::KEY_SIZE)}; // Write out the file with random content { @@ -619,7 +668,7 @@ BOOST_AUTO_TEST_CASE(buffered_writer_matches_autofile_random_content) const FlatFileSeq test_buffered{m_args.GetDataDirBase(), "buffered_write_test", node::BLOCKFILE_CHUNK_SIZE}; const FlatFileSeq test_direct{m_args.GetDataDirBase(), "direct_write_test", node::BLOCKFILE_CHUNK_SIZE}; - const std::vector obfuscation{m_rng.randbytes(Obfuscation::KEY_SIZE)}; + const auto obfuscation{m_rng.randbytes(Obfuscation::KEY_SIZE)}; { DataBuffer test_data{m_rng.randbytes(file_size)}; From 07c5904343187b77478c3ece69e6471eb34ce277 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Fri, 6 Dec 2024 16:18:03 +0100 Subject: [PATCH 096/356] bench: make ObfuscationBench more representative A previous PR already solved the tiny byte-array-xors during serialization, so it makes sense to keep focusing on the performance of bigger continuous chunks. This also renames the file from `xor` to `obfuscation` to enable scripted diff name unification later. > C++ compiler .......................... GNU 14.2.0 | ns/byte | byte/s | err% | ins/byte | cyc/byte | IPC | bra/byte | miss% | total | benchmark |--------------------:|--------------------:|--------:|----------------:|----------------:|-------:|---------------:|--------:|----------:|:---------- | 0.84 | 1,184,138,235.64 | 0.0% | 9.01 | 3.03 | 2.971 | 1.00 | 0.1% | 5.50 | `ObfuscationBench` > C++ compiler .......................... Clang 20.1.7 | ns/byte | byte/s | err% | ins/byte | cyc/byte | IPC | bra/byte | miss% | total | benchmark |--------------------:|--------------------:|--------:|----------------:|----------------:|-------:|---------------:|--------:|----------:|:---------- | 0.89 | 1,124,087,330.23 | 0.1% | 6.52 | 3.20 | 2.041 | 0.50 | 0.2% | 5.50 | `ObfuscationBench` Github-Pull: #31144 Rebased-From: 972697976c027b5199150a98e886c199b7ffc335 --- src/bench/CMakeLists.txt | 2 +- src/bench/{xor.cpp => obfuscation.cpp} | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) rename src/bench/{xor.cpp => obfuscation.cpp} (59%) diff --git a/src/bench/CMakeLists.txt b/src/bench/CMakeLists.txt index 16eb29250f..eecc197b7f 100644 --- a/src/bench/CMakeLists.txt +++ b/src/bench/CMakeLists.txt @@ -34,6 +34,7 @@ add_executable(bench_bitcoin mempool_eviction.cpp mempool_stress.cpp merkle_root.cpp + obfuscation.cpp parse_hex.cpp peer_eviction.cpp poly1305.cpp @@ -49,7 +50,6 @@ add_executable(bench_bitcoin strencodings.cpp util_time.cpp verify_script.cpp - xor.cpp ) include(TargetDataSources) diff --git a/src/bench/xor.cpp b/src/bench/obfuscation.cpp similarity index 59% rename from src/bench/xor.cpp rename to src/bench/obfuscation.cpp index f3d6145c2b..27a254f803 100644 --- a/src/bench/xor.cpp +++ b/src/bench/obfuscation.cpp @@ -4,22 +4,23 @@ #include #include -#include #include #include #include #include -static void Xor(benchmark::Bench& bench) +static void ObfuscationBench(benchmark::Bench& bench) { FastRandomContext frc{/*fDeterministic=*/true}; auto data{frc.randbytes(1024)}; - auto key{frc.randbytes(Obfuscation::KEY_SIZE)}; + const auto key{frc.randbytes()}; + size_t offset{0}; bench.batch(data.size()).unit("byte").run([&] { - util::Xor(data, key); + util::Xor(data, key, offset++); // mutated differently each time + ankerl::nanobench::doNotOptimizeAway(data); }); } -BENCHMARK(Xor, benchmark::PriorityLevel::HIGH); +BENCHMARK(ObfuscationBench, benchmark::PriorityLevel::HIGH); From 0cd45d286497224f406d64e06780e51bc3596ae1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Fri, 25 Apr 2025 23:18:48 +0200 Subject: [PATCH 097/356] scripted-diff: unify xor-vs-obfuscation nomenclature Mechanical refactor of the low-level "xor" wording to signal the intent instead of the implementation used. The renames are ordered by heaviest-hitting substitutions first, and were constructed such that after each replacement the code is still compilable. -BEGIN VERIFY SCRIPT- sed -i \ -e 's/\bGetObfuscateKey\b/GetObfuscation/g' \ -e 's/\bxor_key\b/obfuscation/g' \ -e 's/\bxor_pat\b/obfuscation/g' \ -e 's/\bm_xor_key\b/m_obfuscation/g' \ -e 's/\bm_xor\b/m_obfuscation/g' \ -e 's/\bobfuscate_key\b/m_obfuscation/g' \ -e 's/\bOBFUSCATE_KEY_KEY\b/OBFUSCATION_KEY_KEY/g' \ -e 's/\bSetXor(/SetObfuscation(/g' \ -e 's/\bdata_xor\b/obfuscation/g' \ -e 's/\bCreateObfuscateKey\b/CreateObfuscation/g' \ -e 's/\bobfuscate key\b/obfuscation key/g' \ $(git ls-files '*.cpp' '*.h') -END VERIFY SCRIPT- Github-Pull: #31144 Rebased-From: 0b8bec8aa6260c499c2663ab7a1c905da0d312c3 --- src/dbwrapper.cpp | 24 ++++++++++++------------ src/dbwrapper.h | 14 +++++++------- src/node/blockstorage.cpp | 22 +++++++++++----------- src/node/blockstorage.h | 2 +- src/node/mempool_persist.cpp | 14 +++++++------- src/streams.cpp | 14 +++++++------- src/streams.h | 6 +++--- src/test/dbwrapper_tests.cpp | 12 ++++++------ src/test/streams_tests.cpp | 10 +++++----- 9 files changed, 59 insertions(+), 59 deletions(-) diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp index 41ea7789ea..e196aacc38 100644 --- a/src/dbwrapper.cpp +++ b/src/dbwrapper.cpp @@ -172,7 +172,7 @@ void CDBBatch::Clear() void CDBBatch::WriteImpl(Span key, DataStream& ssValue) { leveldb::Slice slKey(CharCast(key.data()), key.size()); - ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent)); + ssValue.Xor(dbwrapper_private::GetObfuscation(parent)); leveldb::Slice slValue(CharCast(ssValue.data()), ssValue.size()); m_impl_batch->batch.Put(slKey, slValue); // LevelDB serializes writes as: @@ -257,23 +257,23 @@ CDBWrapper::CDBWrapper(const DBParams& params) } // The base-case obfuscation key, which is a noop. - obfuscate_key = std::vector(Obfuscation::KEY_SIZE, '\000'); + m_obfuscation = std::vector(Obfuscation::KEY_SIZE, '\000'); - bool key_exists = Read(OBFUSCATE_KEY_KEY, obfuscate_key); + bool key_exists = Read(OBFUSCATION_KEY_KEY, m_obfuscation); if (!key_exists && params.obfuscate && IsEmpty()) { // Initialize non-degenerate obfuscation if it won't upset // existing, non-obfuscated data. - std::vector new_key = CreateObfuscateKey(); + std::vector new_key = CreateObfuscation(); // Write `new_key` so we don't obfuscate the key with itself - Write(OBFUSCATE_KEY_KEY, new_key); - obfuscate_key = new_key; + Write(OBFUSCATION_KEY_KEY, new_key); + m_obfuscation = new_key; - LogPrintf("Wrote new obfuscate key for %s: %s\n", fs::PathToString(params.path), HexStr(obfuscate_key)); + LogPrintf("Wrote new obfuscation key for %s: %s\n", fs::PathToString(params.path), HexStr(m_obfuscation)); } - LogPrintf("Using obfuscation key for %s: %s\n", fs::PathToString(params.path), HexStr(obfuscate_key)); + LogPrintf("Using obfuscation key for %s: %s\n", fs::PathToString(params.path), HexStr(m_obfuscation)); } CDBWrapper::~CDBWrapper() @@ -322,13 +322,13 @@ size_t CDBWrapper::DynamicMemoryUsage() const // // We must use a string constructor which specifies length so that we copy // past the null-terminator. -const std::string CDBWrapper::OBFUSCATE_KEY_KEY("\000obfuscate_key", 14); +const std::string CDBWrapper::OBFUSCATION_KEY_KEY("\000obfuscate_key", 14); /** * Returns a string (consisting of 8 random bytes) suitable for use as an * obfuscating XOR key. */ -std::vector CDBWrapper::CreateObfuscateKey() const +std::vector CDBWrapper::CreateObfuscation() const { std::vector ret(Obfuscation::KEY_SIZE); GetRandBytes(ret); @@ -418,9 +418,9 @@ void CDBIterator::Next() { m_impl_iter->iter->Next(); } namespace dbwrapper_private { -const std::vector& GetObfuscateKey(const CDBWrapper &w) +const std::vector& GetObfuscation(const CDBWrapper &w) { - return w.obfuscate_key; + return w.m_obfuscation; } } // namespace dbwrapper_private diff --git a/src/dbwrapper.h b/src/dbwrapper.h index d07ac3b671..2fa2b06ae7 100644 --- a/src/dbwrapper.h +++ b/src/dbwrapper.h @@ -63,7 +63,7 @@ namespace dbwrapper_private { * Database obfuscation should be considered an implementation detail of the * specific database. */ -const std::vector& GetObfuscateKey(const CDBWrapper &w); +const std::vector& GetObfuscation(const CDBWrapper &w); }; // namespace dbwrapper_private @@ -168,7 +168,7 @@ class CDBIterator template bool GetValue(V& value) { try { DataStream ssValue{GetValueImpl()}; - ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent)); + ssValue.Xor(dbwrapper_private::GetObfuscation(parent)); ssValue >> value; } catch (const std::exception&) { return false; @@ -181,7 +181,7 @@ struct LevelDBContext; class CDBWrapper { - friend const std::vector& dbwrapper_private::GetObfuscateKey(const CDBWrapper &w); + friend const std::vector& dbwrapper_private::GetObfuscation(const CDBWrapper &w); private: //! holds all leveldb-specific fields of this class std::unique_ptr m_db_context; @@ -190,12 +190,12 @@ class CDBWrapper std::string m_name; //! a key used for optional XOR-obfuscation of the database - std::vector obfuscate_key; + std::vector m_obfuscation; //! the key under which the obfuscation key is stored - static const std::string OBFUSCATE_KEY_KEY; + static const std::string OBFUSCATION_KEY_KEY; - std::vector CreateObfuscateKey() const; + std::vector CreateObfuscation() const; //! path to filesystem storage const fs::path m_path; @@ -227,7 +227,7 @@ class CDBWrapper } try { DataStream ssValue{MakeByteSpan(*strValue)}; - ssValue.Xor(obfuscate_key); + ssValue.Xor(m_obfuscation); ssValue >> value; } catch (const std::exception&) { return false; diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index f361edc303..5231c1b45a 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -792,13 +792,13 @@ void BlockManager::UnlinkPrunedFiles(const std::set& setFilesToPrune) const AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const { - return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_xor_key}; + return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_obfuscation}; } /** Open an undo file (rev?????.dat) */ AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const { - return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_xor_key}; + return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_obfuscation}; } fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const @@ -1137,7 +1137,7 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) { // Bytes are serialized without length indicator, so this is also the exact // size of the XOR-key file. - std::array xor_key{}; + std::array obfuscation{}; // Consider this to be the first run if the blocksdir contains only hidden // files (those which start with a .). Checking for a fully-empty dir would @@ -1154,14 +1154,14 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) if (opts.use_xor && first_run) { // Only use random fresh key when the boolean option is set and on the // very first start of the program. - FastRandomContext{}.fillrand(xor_key); + FastRandomContext{}.fillrand(obfuscation); } const fs::path xor_key_path{opts.blocks_dir / "xor.dat"}; if (fs::exists(xor_key_path)) { // A pre-existing xor key file has priority. AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")}; - xor_key_file >> xor_key; + xor_key_file >> obfuscation; } else { // Create initial or missing xor key file AutoFile xor_key_file{fsbridge::fopen(xor_key_path, @@ -1171,7 +1171,7 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) "wbx" #endif )}; - xor_key_file << xor_key; + xor_key_file << obfuscation; if (xor_key_file.fclose() != 0) { throw std::runtime_error{strprintf("Error closing XOR key file %s: %s", fs::PathToString(xor_key_path), @@ -1179,20 +1179,20 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) } } // If the user disabled the key, it must be zero. - if (!opts.use_xor && xor_key != decltype(xor_key){}) { + if (!opts.use_xor && obfuscation != decltype(obfuscation){}) { throw std::runtime_error{ strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! " "Stored key: '%s', stored path: '%s'.", - HexStr(xor_key), fs::PathToString(xor_key_path)), + HexStr(obfuscation), fs::PathToString(xor_key_path)), }; } - LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(xor_key)); - return std::vector{xor_key.begin(), xor_key.end()}; + LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(obfuscation)); + return std::vector{obfuscation.begin(), obfuscation.end()}; } BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts) : m_prune_mode{opts.prune_target > 0}, - m_xor_key{InitBlocksdirXorKey(opts)}, + m_obfuscation{InitBlocksdirXorKey(opts)}, m_opts{std::move(opts)}, m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}}, m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}}, diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index b2506d6471..1b847efd49 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -235,7 +235,7 @@ class BlockManager const bool m_prune_mode; - const std::vector m_xor_key; + const std::vector m_obfuscation; /** Dirty block index entries. */ std::set m_dirty_blockindex; diff --git a/src/node/mempool_persist.cpp b/src/node/mempool_persist.cpp index e09f318c68..42f5dd07ce 100644 --- a/src/node/mempool_persist.cpp +++ b/src/node/mempool_persist.cpp @@ -60,15 +60,15 @@ bool LoadMempool(CTxMemPool& pool, const fs::path& load_path, Chainstate& active try { uint64_t version; file >> version; - std::vector xor_key; + std::vector obfuscation; if (version == MEMPOOL_DUMP_VERSION_NO_XOR_KEY) { // Leave XOR-key empty } else if (version == MEMPOOL_DUMP_VERSION) { - file >> xor_key; + file >> obfuscation; } else { return false; } - file.SetXor(xor_key); + file.SetObfuscation(obfuscation); uint64_t total_txns_to_load; file >> total_txns_to_load; uint64_t txns_tried = 0; @@ -179,12 +179,12 @@ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, FopenFn mock const uint64_t version{pool.m_opts.persist_v1_dat ? MEMPOOL_DUMP_VERSION_NO_XOR_KEY : MEMPOOL_DUMP_VERSION}; file << version; - std::vector xor_key(Obfuscation::KEY_SIZE); + std::vector obfuscation(Obfuscation::KEY_SIZE); if (!pool.m_opts.persist_v1_dat) { - FastRandomContext{}.fillrand(xor_key); - file << xor_key; + FastRandomContext{}.fillrand(obfuscation); + file << obfuscation; } - file.SetXor(xor_key); + file.SetObfuscation(obfuscation); uint64_t mempool_transactions_to_write(vinfo.size()); file << mempool_transactions_to_write; diff --git a/src/streams.cpp b/src/streams.cpp index 02c4095465..e0944d5cd9 100644 --- a/src/streams.cpp +++ b/src/streams.cpp @@ -9,8 +9,8 @@ #include -AutoFile::AutoFile(std::FILE* file, std::vector data_xor) - : m_file{file}, m_xor{std::move(data_xor)} +AutoFile::AutoFile(std::FILE* file, std::vector obfuscation) + : m_file{file}, m_obfuscation{std::move(obfuscation)} { if (!IsNull()) { auto pos{std::ftell(m_file)}; @@ -22,9 +22,9 @@ std::size_t AutoFile::detail_fread(Span dst) { if (!m_file) throw std::ios_base::failure("AutoFile::read: file handle is nullptr"); size_t ret = std::fread(dst.data(), 1, dst.size(), m_file); - if (!m_xor.empty()) { + if (!m_obfuscation.empty()) { if (!m_position.has_value()) throw std::ios_base::failure("AutoFile::read: position unknown"); - util::Xor(dst.subspan(0, ret), m_xor, *m_position); + util::Xor(dst.subspan(0, ret), m_obfuscation, *m_position); } if (m_position.has_value()) *m_position += ret; return ret; @@ -81,7 +81,7 @@ void AutoFile::ignore(size_t nSize) void AutoFile::write(Span src) { if (!m_file) throw std::ios_base::failure("AutoFile::write: file handle is nullptr"); - if (m_xor.empty()) { + if (m_obfuscation.empty()) { if (std::fwrite(src.data(), 1, src.size(), m_file) != src.size()) { throw std::ios_base::failure("AutoFile::write: write failed"); } @@ -101,9 +101,9 @@ void AutoFile::write(Span src) void AutoFile::write_buffer(std::span src) { if (!m_file) throw std::ios_base::failure("AutoFile::write_buffer: file handle is nullptr"); - if (m_xor.size()) { + if (m_obfuscation.size()) { if (!m_position) throw std::ios_base::failure("AutoFile::write_buffer: obfuscation position unknown"); - util::Xor(src, m_xor, *m_position); // obfuscate in-place + util::Xor(src, m_obfuscation, *m_position); // obfuscate in-place } if (std::fwrite(src.data(), 1, src.size(), m_file) != src.size()) { throw std::ios_base::failure("AutoFile::write_buffer: write failed"); diff --git a/src/streams.h b/src/streams.h index c20e22a129..1094f025de 100644 --- a/src/streams.h +++ b/src/streams.h @@ -402,12 +402,12 @@ class AutoFile { protected: std::FILE* m_file; - std::vector m_xor; + std::vector m_obfuscation; std::optional m_position; bool m_was_written{false}; public: - explicit AutoFile(std::FILE* file, std::vector data_xor={}); + explicit AutoFile(std::FILE* file, std::vector obfuscation={}); ~AutoFile() { @@ -455,7 +455,7 @@ class AutoFile bool IsNull() const { return m_file == nullptr; } /** Continue with a different XOR key */ - void SetXor(std::vector data_xor) { m_xor = data_xor; } + void SetObfuscation(std::vector obfuscation) { m_obfuscation = obfuscation; } /** Implementation detail, only used internally. */ std::size_t detail_fread(Span dst); diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp index bd50db17c1..9c723351d6 100644 --- a/src/test/dbwrapper_tests.cpp +++ b/src/test/dbwrapper_tests.cpp @@ -42,7 +42,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper) BOOST_CHECK_EQUAL(obfuscate, !dbw.IsEmpty()); // Ensure that we're doing real obfuscation when obfuscate=true - obfuscation_key = dbwrapper_private::GetObfuscateKey(dbw); + obfuscation_key = dbwrapper_private::GetObfuscation(dbw); BOOST_CHECK_EQUAL(obfuscate, !is_null_key(obfuscation_key)); for (uint8_t k{0}; k < 10; ++k) { @@ -56,7 +56,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper) // Verify that the obfuscation key is never obfuscated { CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .obfuscate = false}}; - BOOST_CHECK(obfuscation_key == dbwrapper_private::GetObfuscateKey(dbw)); + BOOST_CHECK(obfuscation_key == dbwrapper_private::GetObfuscation(dbw)); } // Read back the values @@ -64,7 +64,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper) CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .obfuscate = obfuscate}}; // Ensure obfuscation is read back correctly - BOOST_CHECK(obfuscation_key == dbwrapper_private::GetObfuscateKey(dbw)); + BOOST_CHECK(obfuscation_key == dbwrapper_private::GetObfuscation(dbw)); BOOST_CHECK_EQUAL(obfuscate, !is_null_key(obfuscation_key)); // Verify all written values @@ -89,7 +89,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data) bool res_bool; // Ensure that we're doing real obfuscation when obfuscate=true - BOOST_CHECK_EQUAL(obfuscate, !is_null_key(dbwrapper_private::GetObfuscateKey(dbw))); + BOOST_CHECK_EQUAL(obfuscate, !is_null_key(dbwrapper_private::GetObfuscation(dbw))); //Simulate block raw data - "b + block hash" std::string key_block = "b" + m_rng.rand256().ToString(); @@ -264,7 +264,7 @@ BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate) BOOST_CHECK_EQUAL(res2.ToString(), in.ToString()); BOOST_CHECK(!odbw.IsEmpty()); - BOOST_CHECK(is_null_key(dbwrapper_private::GetObfuscateKey(odbw))); // The key should be an empty string + BOOST_CHECK(is_null_key(dbwrapper_private::GetObfuscation(odbw))); // The key should be an empty string uint256 in2 = m_rng.rand256(); uint256 res3; @@ -301,7 +301,7 @@ BOOST_AUTO_TEST_CASE(existing_data_reindex) // Check that the key/val we wrote with unobfuscated wrapper doesn't exist uint256 res2; BOOST_CHECK(!odbw.Read(key, res2)); - BOOST_CHECK(!is_null_key(dbwrapper_private::GetObfuscateKey(odbw))); + BOOST_CHECK(!is_null_key(dbwrapper_private::GetObfuscation(odbw))); uint256 in2 = m_rng.rand256(); uint256 res3; diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index 196420fd67..5772d4df21 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -79,11 +79,11 @@ BOOST_AUTO_TEST_CASE(xor_file) auto raw_file{[&](const auto& mode) { return fsbridge::fopen(xor_path, mode); }}; const std::vector test1{1, 2, 3}; const std::vector test2{4, 5}; - const auto xor_pat{"ff00ff00ff00ff00"_hex_v}; + const auto obfuscation{"ff00ff00ff00ff00"_hex_v}; { // Check errors for missing file - AutoFile xor_file{raw_file("rb"), xor_pat}; + AutoFile xor_file{raw_file("rb"), obfuscation}; BOOST_CHECK_EXCEPTION(xor_file << std::byte{}, std::ios_base::failure, HasReason{"AutoFile::write: file handle is nullptr"}); BOOST_CHECK_EXCEPTION(xor_file >> std::byte{}, std::ios_base::failure, HasReason{"AutoFile::read: file handle is nullptr"}); BOOST_CHECK_EXCEPTION(xor_file.ignore(1), std::ios_base::failure, HasReason{"AutoFile::ignore: file handle is nullptr"}); @@ -95,7 +95,7 @@ BOOST_AUTO_TEST_CASE(xor_file) #else const char* mode = "wbx"; #endif - AutoFile xor_file{raw_file(mode), xor_pat}; + AutoFile xor_file{raw_file(mode), obfuscation}; xor_file << test1 << test2; BOOST_REQUIRE_EQUAL(xor_file.fclose(), 0); } @@ -109,7 +109,7 @@ BOOST_AUTO_TEST_CASE(xor_file) BOOST_CHECK_EXCEPTION(non_xor_file.ignore(1), std::ios_base::failure, HasReason{"AutoFile::ignore: end of file"}); } { - AutoFile xor_file{raw_file("rb"), xor_pat}; + AutoFile xor_file{raw_file("rb"), obfuscation}; std::vector read1, read2; xor_file >> read1 >> read2; BOOST_CHECK_EQUAL(HexStr(read1), HexStr(test1)); @@ -118,7 +118,7 @@ BOOST_AUTO_TEST_CASE(xor_file) BOOST_CHECK_EXCEPTION(xor_file >> std::byte{}, std::ios_base::failure, HasReason{"AutoFile::read: end of file"}); } { - AutoFile xor_file{raw_file("rb"), xor_pat}; + AutoFile xor_file{raw_file("rb"), obfuscation}; std::vector read2; // Check that ignore works xor_file.ignore(4); From 60659175ec890862e15fab8454350dae302024b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Sat, 5 Apr 2025 18:25:20 +0200 Subject: [PATCH 098/356] refactor: prepare `DBWrapper` for obfuscation key change Since `FastRandomContext` delegates to `GetRandBytes` anyway, we can simplify new key generation to a Write/Read combo, unifying the flow of enabling obfuscation via `Read`. The comments were also adjusted to clarify that the `m_obfuscation` field affects the behavior of `Read` and `Write` methods. These changes are meant to simplify the diffs for the riskier optimization commits later. Github-Pull: #31144 Rebased-From: 6bbf2d9311b47a8a15c17d9fe11828ee623d98e0 --- src/dbwrapper.cpp | 40 +++++++--------------------------------- src/dbwrapper.h | 6 ++---- 2 files changed, 9 insertions(+), 37 deletions(-) diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp index e196aacc38..96a1e7252c 100644 --- a/src/dbwrapper.cpp +++ b/src/dbwrapper.cpp @@ -256,24 +256,15 @@ CDBWrapper::CDBWrapper(const DBParams& params) LogPrintf("Finished database compaction of %s\n", fs::PathToString(params.path)); } - // The base-case obfuscation key, which is a noop. - m_obfuscation = std::vector(Obfuscation::KEY_SIZE, '\000'); - - bool key_exists = Read(OBFUSCATION_KEY_KEY, m_obfuscation); - - if (!key_exists && params.obfuscate && IsEmpty()) { - // Initialize non-degenerate obfuscation if it won't upset - // existing, non-obfuscated data. - std::vector new_key = CreateObfuscation(); - - // Write `new_key` so we don't obfuscate the key with itself - Write(OBFUSCATION_KEY_KEY, new_key); - m_obfuscation = new_key; - - LogPrintf("Wrote new obfuscation key for %s: %s\n", fs::PathToString(params.path), HexStr(m_obfuscation)); + m_obfuscation = std::vector(Obfuscation::KEY_SIZE, '\000'); // Needed for unobfuscated Read()/Write() below + if (!Read(OBFUSCATION_KEY_KEY, m_obfuscation) && params.obfuscate && IsEmpty()) { + // Generate, write and read back the new obfuscation key, making sure we don't obfuscate the key itself + Write(OBFUSCATION_KEY_KEY, FastRandomContext{}.randbytes(Obfuscation::KEY_SIZE)); + Read(OBFUSCATION_KEY_KEY, m_obfuscation); + LogInfo("Wrote new obfuscation key for %s: %s", fs::PathToString(params.path), HexStr(m_obfuscation)); } + LogInfo("Using obfuscation key for %s: %s", fs::PathToString(params.path), HexStr(m_obfuscation)); - LogPrintf("Using obfuscation key for %s: %s\n", fs::PathToString(params.path), HexStr(m_obfuscation)); } CDBWrapper::~CDBWrapper() @@ -318,23 +309,6 @@ size_t CDBWrapper::DynamicMemoryUsage() const return parsed.value(); } -// Prefixed with null character to avoid collisions with other keys -// -// We must use a string constructor which specifies length so that we copy -// past the null-terminator. -const std::string CDBWrapper::OBFUSCATION_KEY_KEY("\000obfuscate_key", 14); - -/** - * Returns a string (consisting of 8 random bytes) suitable for use as an - * obfuscating XOR key. - */ -std::vector CDBWrapper::CreateObfuscation() const -{ - std::vector ret(Obfuscation::KEY_SIZE); - GetRandBytes(ret); - return ret; -} - std::optional CDBWrapper::ReadImpl(Span key) const { leveldb::Slice slKey(CharCast(key.data()), key.size()); diff --git a/src/dbwrapper.h b/src/dbwrapper.h index 2fa2b06ae7..b9ab83afa9 100644 --- a/src/dbwrapper.h +++ b/src/dbwrapper.h @@ -192,10 +192,8 @@ class CDBWrapper //! a key used for optional XOR-obfuscation of the database std::vector m_obfuscation; - //! the key under which the obfuscation key is stored - static const std::string OBFUSCATION_KEY_KEY; - - std::vector CreateObfuscation() const; + //! obfuscation key storage key, null-prefixed to avoid collisions + inline static const std::string OBFUSCATION_KEY_KEY{"\000obfuscate_key", 14}; // explicit size to avoid truncation at leading \0 //! path to filesystem storage const fs::path m_path; From cf56e5471e06531d2f0bfcd6bb893ed083724130 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Sat, 5 Apr 2025 19:01:09 +0200 Subject: [PATCH 099/356] refactor: prepare mempool_persist for obfuscation key change These changes are meant to simplify the diffs for the riskier optimization commits later. Github-Pull: #31144 Rebased-From: fa5d296e3beb312e2bc39532a12bcdf187c6da91 --- src/node/mempool_persist.cpp | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/src/node/mempool_persist.cpp b/src/node/mempool_persist.cpp index 42f5dd07ce..f69b955ec9 100644 --- a/src/node/mempool_persist.cpp +++ b/src/node/mempool_persist.cpp @@ -60,15 +60,17 @@ bool LoadMempool(CTxMemPool& pool, const fs::path& load_path, Chainstate& active try { uint64_t version; file >> version; - std::vector obfuscation; + if (version == MEMPOOL_DUMP_VERSION_NO_XOR_KEY) { - // Leave XOR-key empty + file.SetObfuscation({}); } else if (version == MEMPOOL_DUMP_VERSION) { + std::vector obfuscation(Obfuscation::KEY_SIZE); file >> obfuscation; + file.SetObfuscation(obfuscation); } else { return false; } - file.SetObfuscation(obfuscation); + uint64_t total_txns_to_load; file >> total_txns_to_load; uint64_t txns_tried = 0; @@ -179,12 +181,14 @@ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, FopenFn mock const uint64_t version{pool.m_opts.persist_v1_dat ? MEMPOOL_DUMP_VERSION_NO_XOR_KEY : MEMPOOL_DUMP_VERSION}; file << version; - std::vector obfuscation(Obfuscation::KEY_SIZE); if (!pool.m_opts.persist_v1_dat) { + std::vector obfuscation(Obfuscation::KEY_SIZE); FastRandomContext{}.fillrand(obfuscation); file << obfuscation; + file.SetObfuscation(obfuscation); + } else { + file.SetObfuscation({}); } - file.SetObfuscation(obfuscation); uint64_t mempool_transactions_to_write(vinfo.size()); file << mempool_transactions_to_write; From 8f55830d9d21e720d3c4bd73bc8711da1273f6ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Sat, 5 Jul 2025 13:39:15 +0200 Subject: [PATCH 100/356] refactor: move `util::Xor` to `Obfuscation().Xor` This is meant to focus the usages to narrow the scope of the obfuscation optimization. `Obfuscation::Xor` is mostly a move. Co-authored-by: maflcko <6399679+maflcko@users.noreply.github.com> Github-Pull: #31144 Rebased-From: 377aab8e5a8da2ea20383b4dde59094cc42d3407 --- src/bench/obfuscation.cpp | 3 +-- src/streams.cpp | 5 +++-- src/streams.h | 24 ++---------------------- src/test/streams_tests.cpp | 4 ++-- src/util/obfuscation.h | 18 ++++++++++++++++++ 5 files changed, 26 insertions(+), 28 deletions(-) diff --git a/src/bench/obfuscation.cpp b/src/bench/obfuscation.cpp index 27a254f803..2e9f9a453a 100644 --- a/src/bench/obfuscation.cpp +++ b/src/bench/obfuscation.cpp @@ -4,7 +4,6 @@ #include #include -#include #include #include @@ -18,7 +17,7 @@ static void ObfuscationBench(benchmark::Bench& bench) size_t offset{0}; bench.batch(data.size()).unit("byte").run([&] { - util::Xor(data, key, offset++); // mutated differently each time + Obfuscation().Xor(data, key, offset++); // mutated differently each time ankerl::nanobench::doNotOptimizeAway(data); }); } diff --git a/src/streams.cpp b/src/streams.cpp index e0944d5cd9..c050cc4b33 100644 --- a/src/streams.cpp +++ b/src/streams.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -24,7 +25,7 @@ std::size_t AutoFile::detail_fread(Span dst) size_t ret = std::fread(dst.data(), 1, dst.size(), m_file); if (!m_obfuscation.empty()) { if (!m_position.has_value()) throw std::ios_base::failure("AutoFile::read: position unknown"); - util::Xor(dst.subspan(0, ret), m_obfuscation, *m_position); + Obfuscation().Xor(dst.subspan(0, ret), m_obfuscation, *m_position); } if (m_position.has_value()) *m_position += ret; return ret; @@ -103,7 +104,7 @@ void AutoFile::write_buffer(std::span src) if (!m_file) throw std::ios_base::failure("AutoFile::write_buffer: file handle is nullptr"); if (m_obfuscation.size()) { if (!m_position) throw std::ios_base::failure("AutoFile::write_buffer: obfuscation position unknown"); - util::Xor(src, m_obfuscation, *m_position); // obfuscate in-place + Obfuscation().Xor(src, m_obfuscation, *m_position); // obfuscate in-place } if (std::fwrite(src.data(), 1, src.size(), m_file) != src.size()) { throw std::ios_base::failure("AutoFile::write_buffer: write failed"); diff --git a/src/streams.h b/src/streams.h index 1094f025de..0f25b5baab 100644 --- a/src/streams.h +++ b/src/streams.h @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -28,27 +29,6 @@ #include #include -namespace util { -inline void Xor(Span write, Span key, size_t key_offset = 0) -{ - if (key.size() == 0) { - return; - } - key_offset %= key.size(); - - for (size_t i = 0, j = key_offset; i != write.size(); i++) { - write[i] ^= key[j++]; - - // This potentially acts on very many bytes of data, so it's - // important that we calculate `j`, i.e. the `key` index in this - // way instead of doing a %, which would effectively be a division - // for each byte Xor'd -- much slower than need be. - if (j == key.size()) - j = 0; - } -} -} // namespace util - /* Minimal stream for overwriting and/or appending to an existing byte vector * * The referenced vector will grow as necessary @@ -279,7 +259,7 @@ class DataStream */ void Xor(const std::vector& key) { - util::Xor(MakeWritableByteSpan(*this), MakeByteSpan(key)); + Obfuscation().Xor(MakeWritableByteSpan(*this), MakeByteSpan(key)); } /** Compute total memory usage of this object (own memory + any dynamic memory). */ diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index 5772d4df21..3f590f3279 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -24,7 +24,7 @@ BOOST_AUTO_TEST_CASE(xor_roundtrip_random_chunks) auto apply_random_xor_chunks{[&](std::span target, std::span obfuscation) { for (size_t offset{0}; offset < target.size();) { const size_t chunk_size{1 + m_rng.randrange(target.size() - offset)}; - util::Xor(target.subspan(offset, chunk_size), obfuscation, offset); + Obfuscation().Xor(target.subspan(offset, chunk_size), obfuscation, offset); offset += chunk_size; } }}; @@ -67,7 +67,7 @@ BOOST_AUTO_TEST_CASE(xor_bytes_reference) std::vector actual{expected}; expected_xor(std::span{expected}.subspan(write_offset), key_bytes, key_offset); - util::Xor(std::span{actual}.subspan(write_offset), key_bytes, key_offset); + Obfuscation().Xor(std::span{actual}.subspan(write_offset), key_bytes, key_offset); BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), actual.begin(), actual.end()); } diff --git a/src/util/obfuscation.h b/src/util/obfuscation.h index 628dacfc9d..2c098cc931 100644 --- a/src/util/obfuscation.h +++ b/src/util/obfuscation.h @@ -6,11 +6,29 @@ #define BITCOIN_UTIL_OBFUSCATION_H #include +#include class Obfuscation { public: static constexpr size_t KEY_SIZE{sizeof(uint64_t)}; + + void Xor(Span write, Span key, size_t key_offset = 0) + { + assert(key.size() == KEY_SIZE); + key_offset %= KEY_SIZE; + + for (size_t i = 0, j = key_offset; i != write.size(); i++) { + write[i] ^= key[j++]; + + // This potentially acts on very many bytes of data, so it's + // important that we calculate `j`, i.e. the `key` index in this + // way instead of doing a %, which would effectively be a division + // for each byte Xor'd -- much slower than need be. + if (j == KEY_SIZE) + j = 0; + } + } }; #endif // BITCOIN_UTIL_OBFUSCATION_H From 0d512d87550a118697d1e14a9b2660d2456b8dcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Sat, 5 Jul 2025 14:39:19 +0200 Subject: [PATCH 101/356] refactor: encapsulate `vector`/`array` keys into `Obfuscation` Github-Pull: #31144 Rebased-From: 478d40afc6faaca47b5cf94bb461692d03347599 --- src/bench/obfuscation.cpp | 4 +-- src/dbwrapper.cpp | 11 +++--- src/dbwrapper.h | 14 ++++---- src/node/blockstorage.cpp | 2 +- src/node/blockstorage.h | 2 +- src/node/mempool_persist.cpp | 5 ++- src/streams.cpp | 19 +++++----- src/streams.h | 19 +++------- src/test/dbwrapper_tests.cpp | 29 ++++++--------- src/test/fuzz/autofile.cpp | 2 +- src/test/fuzz/buffered_file.cpp | 2 +- src/test/streams_tests.cpp | 63 +++++++++++++++++++++++++-------- src/util/obfuscation.h | 49 ++++++++++++++++++++++--- 13 files changed, 136 insertions(+), 85 deletions(-) diff --git a/src/bench/obfuscation.cpp b/src/bench/obfuscation.cpp index 2e9f9a453a..178be56a5d 100644 --- a/src/bench/obfuscation.cpp +++ b/src/bench/obfuscation.cpp @@ -13,11 +13,11 @@ static void ObfuscationBench(benchmark::Bench& bench) { FastRandomContext frc{/*fDeterministic=*/true}; auto data{frc.randbytes(1024)}; - const auto key{frc.randbytes()}; + const Obfuscation obfuscation{frc.randbytes()}; size_t offset{0}; bench.batch(data.size()).unit("byte").run([&] { - Obfuscation().Xor(data, key, offset++); // mutated differently each time + obfuscation(data, offset++); // mutated differently each time ankerl::nanobench::doNotOptimizeAway(data); }); } diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp index 96a1e7252c..6e9ae2ef45 100644 --- a/src/dbwrapper.cpp +++ b/src/dbwrapper.cpp @@ -172,7 +172,7 @@ void CDBBatch::Clear() void CDBBatch::WriteImpl(Span key, DataStream& ssValue) { leveldb::Slice slKey(CharCast(key.data()), key.size()); - ssValue.Xor(dbwrapper_private::GetObfuscation(parent)); + dbwrapper_private::GetObfuscation(parent)(ssValue); leveldb::Slice slValue(CharCast(ssValue.data()), ssValue.size()); m_impl_batch->batch.Put(slKey, slValue); // LevelDB serializes writes as: @@ -256,15 +256,14 @@ CDBWrapper::CDBWrapper(const DBParams& params) LogPrintf("Finished database compaction of %s\n", fs::PathToString(params.path)); } - m_obfuscation = std::vector(Obfuscation::KEY_SIZE, '\000'); // Needed for unobfuscated Read()/Write() below + assert(!m_obfuscation); // Needed for unobfuscated Read()/Write() below if (!Read(OBFUSCATION_KEY_KEY, m_obfuscation) && params.obfuscate && IsEmpty()) { // Generate, write and read back the new obfuscation key, making sure we don't obfuscate the key itself Write(OBFUSCATION_KEY_KEY, FastRandomContext{}.randbytes(Obfuscation::KEY_SIZE)); Read(OBFUSCATION_KEY_KEY, m_obfuscation); - LogInfo("Wrote new obfuscation key for %s: %s", fs::PathToString(params.path), HexStr(m_obfuscation)); + LogInfo("Wrote new obfuscation key for %s: %s", fs::PathToString(params.path), m_obfuscation.HexKey()); } - LogInfo("Using obfuscation key for %s: %s", fs::PathToString(params.path), HexStr(m_obfuscation)); - + LogInfo("Using obfuscation key for %s: %s", fs::PathToString(params.path), m_obfuscation.HexKey()); } CDBWrapper::~CDBWrapper() @@ -392,7 +391,7 @@ void CDBIterator::Next() { m_impl_iter->iter->Next(); } namespace dbwrapper_private { -const std::vector& GetObfuscation(const CDBWrapper &w) +const Obfuscation& GetObfuscation(const CDBWrapper& w) { return w.m_obfuscation; } diff --git a/src/dbwrapper.h b/src/dbwrapper.h index b9ab83afa9..6779eeebb5 100644 --- a/src/dbwrapper.h +++ b/src/dbwrapper.h @@ -18,7 +18,6 @@ #include #include #include -#include static const size_t DBWRAPPER_PREALLOC_KEY_SIZE = 64; static const size_t DBWRAPPER_PREALLOC_VALUE_SIZE = 1024; @@ -63,8 +62,7 @@ namespace dbwrapper_private { * Database obfuscation should be considered an implementation detail of the * specific database. */ -const std::vector& GetObfuscation(const CDBWrapper &w); - +const Obfuscation& GetObfuscation(const CDBWrapper&); }; // namespace dbwrapper_private bool DestroyDB(const std::string& path_str); @@ -168,7 +166,7 @@ class CDBIterator template bool GetValue(V& value) { try { DataStream ssValue{GetValueImpl()}; - ssValue.Xor(dbwrapper_private::GetObfuscation(parent)); + dbwrapper_private::GetObfuscation(parent)(ssValue); ssValue >> value; } catch (const std::exception&) { return false; @@ -181,7 +179,7 @@ struct LevelDBContext; class CDBWrapper { - friend const std::vector& dbwrapper_private::GetObfuscation(const CDBWrapper &w); + friend const Obfuscation& dbwrapper_private::GetObfuscation(const CDBWrapper&); private: //! holds all leveldb-specific fields of this class std::unique_ptr m_db_context; @@ -189,8 +187,8 @@ class CDBWrapper //! the name of this database std::string m_name; - //! a key used for optional XOR-obfuscation of the database - std::vector m_obfuscation; + //! optional XOR-obfuscation of the database + Obfuscation m_obfuscation; //! obfuscation key storage key, null-prefixed to avoid collisions inline static const std::string OBFUSCATION_KEY_KEY{"\000obfuscate_key", 14}; // explicit size to avoid truncation at leading \0 @@ -225,7 +223,7 @@ class CDBWrapper } try { DataStream ssValue{MakeByteSpan(*strValue)}; - ssValue.Xor(m_obfuscation); + m_obfuscation(ssValue); ssValue >> value; } catch (const std::exception&) { return false; diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 5231c1b45a..ebbca1e954 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -1187,7 +1187,7 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) }; } LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(obfuscation)); - return std::vector{obfuscation.begin(), obfuscation.end()}; + return Obfuscation{obfuscation}; } BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts) diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 1b847efd49..9022139a07 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -235,7 +235,7 @@ class BlockManager const bool m_prune_mode; - const std::vector m_obfuscation; + const Obfuscation m_obfuscation; /** Dirty block index entries. */ std::set m_dirty_blockindex; diff --git a/src/node/mempool_persist.cpp b/src/node/mempool_persist.cpp index f69b955ec9..99dfdb6deb 100644 --- a/src/node/mempool_persist.cpp +++ b/src/node/mempool_persist.cpp @@ -64,7 +64,7 @@ bool LoadMempool(CTxMemPool& pool, const fs::path& load_path, Chainstate& active if (version == MEMPOOL_DUMP_VERSION_NO_XOR_KEY) { file.SetObfuscation({}); } else if (version == MEMPOOL_DUMP_VERSION) { - std::vector obfuscation(Obfuscation::KEY_SIZE); + Obfuscation obfuscation; file >> obfuscation; file.SetObfuscation(obfuscation); } else { @@ -182,8 +182,7 @@ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, FopenFn mock file << version; if (!pool.m_opts.persist_v1_dat) { - std::vector obfuscation(Obfuscation::KEY_SIZE); - FastRandomContext{}.fillrand(obfuscation); + const Obfuscation obfuscation{FastRandomContext{}.randbytes()}; file << obfuscation; file.SetObfuscation(obfuscation); } else { diff --git a/src/streams.cpp b/src/streams.cpp index c050cc4b33..d0025b2c44 100644 --- a/src/streams.cpp +++ b/src/streams.cpp @@ -10,8 +10,7 @@ #include -AutoFile::AutoFile(std::FILE* file, std::vector obfuscation) - : m_file{file}, m_obfuscation{std::move(obfuscation)} +AutoFile::AutoFile(std::FILE* file, const Obfuscation& obfuscation) : m_file{file}, m_obfuscation{obfuscation} { if (!IsNull()) { auto pos{std::ftell(m_file)}; @@ -22,12 +21,12 @@ AutoFile::AutoFile(std::FILE* file, std::vector obfuscation) std::size_t AutoFile::detail_fread(Span dst) { if (!m_file) throw std::ios_base::failure("AutoFile::read: file handle is nullptr"); - size_t ret = std::fread(dst.data(), 1, dst.size(), m_file); - if (!m_obfuscation.empty()) { - if (!m_position.has_value()) throw std::ios_base::failure("AutoFile::read: position unknown"); - Obfuscation().Xor(dst.subspan(0, ret), m_obfuscation, *m_position); + const size_t ret = std::fread(dst.data(), 1, dst.size(), m_file); + if (m_obfuscation) { + if (!m_position) throw std::ios_base::failure("AutoFile::read: position unknown"); + m_obfuscation(dst.subspan(0, ret), *m_position); } - if (m_position.has_value()) *m_position += ret; + if (m_position) *m_position += ret; return ret; } @@ -82,7 +81,7 @@ void AutoFile::ignore(size_t nSize) void AutoFile::write(Span src) { if (!m_file) throw std::ios_base::failure("AutoFile::write: file handle is nullptr"); - if (m_obfuscation.empty()) { + if (!m_obfuscation) { if (std::fwrite(src.data(), 1, src.size(), m_file) != src.size()) { throw std::ios_base::failure("AutoFile::write: write failed"); } @@ -102,9 +101,9 @@ void AutoFile::write(Span src) void AutoFile::write_buffer(std::span src) { if (!m_file) throw std::ios_base::failure("AutoFile::write_buffer: file handle is nullptr"); - if (m_obfuscation.size()) { + if (m_obfuscation) { if (!m_position) throw std::ios_base::failure("AutoFile::write_buffer: obfuscation position unknown"); - Obfuscation().Xor(src, m_obfuscation, *m_position); // obfuscate in-place + m_obfuscation(src, *m_position); // obfuscate in-place } if (std::fwrite(src.data(), 1, src.size(), m_file) != src.size()) { throw std::ios_base::failure("AutoFile::write_buffer: write failed"); diff --git a/src/streams.h b/src/streams.h index 0f25b5baab..641211865f 100644 --- a/src/streams.h +++ b/src/streams.h @@ -26,7 +26,6 @@ #include #include #include -#include #include /* Minimal stream for overwriting and/or appending to an existing byte vector @@ -245,23 +244,13 @@ class DataStream return (*this); } - template + template DataStream& operator>>(T&& obj) { ::Unserialize(*this, obj); return (*this); } - /** - * XOR the contents of this stream with a certain key. - * - * @param[in] key The key used to XOR the data in this stream. - */ - void Xor(const std::vector& key) - { - Obfuscation().Xor(MakeWritableByteSpan(*this), MakeByteSpan(key)); - } - /** Compute total memory usage of this object (own memory + any dynamic memory). */ size_t GetMemoryUsage() const noexcept; }; @@ -382,12 +371,12 @@ class AutoFile { protected: std::FILE* m_file; - std::vector m_obfuscation; + Obfuscation m_obfuscation; std::optional m_position; bool m_was_written{false}; public: - explicit AutoFile(std::FILE* file, std::vector obfuscation={}); + explicit AutoFile(std::FILE* file, const Obfuscation& obfuscation = {}); ~AutoFile() { @@ -435,7 +424,7 @@ class AutoFile bool IsNull() const { return m_file == nullptr; } /** Continue with a different XOR key */ - void SetObfuscation(std::vector obfuscation) { m_obfuscation = obfuscation; } + void SetObfuscation(const Obfuscation& obfuscation) { m_obfuscation = obfuscation; } /** Implementation detail, only used internally. */ std::size_t detail_fread(Span dst); diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp index 9c723351d6..cd0f347b66 100644 --- a/src/test/dbwrapper_tests.cpp +++ b/src/test/dbwrapper_tests.cpp @@ -9,21 +9,12 @@ #include #include +#include #include using util::ToString; -// Test if a string consists entirely of null characters -static bool is_null_key(const std::vector& key) { - bool isnull = true; - - for (unsigned int i = 0; i < key.size(); i++) - isnull &= (key[i] == '\x00'); - - return isnull; -} - BOOST_FIXTURE_TEST_SUITE(dbwrapper_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(dbwrapper) @@ -33,7 +24,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper) constexpr size_t CACHE_SIZE{1_MiB}; const fs::path path{m_args.GetDataDirBase() / "dbwrapper"}; - std::vector obfuscation_key{}; + Obfuscation obfuscation; std::vector> key_values{}; // Write values @@ -42,8 +33,8 @@ BOOST_AUTO_TEST_CASE(dbwrapper) BOOST_CHECK_EQUAL(obfuscate, !dbw.IsEmpty()); // Ensure that we're doing real obfuscation when obfuscate=true - obfuscation_key = dbwrapper_private::GetObfuscation(dbw); - BOOST_CHECK_EQUAL(obfuscate, !is_null_key(obfuscation_key)); + obfuscation = dbwrapper_private::GetObfuscation(dbw); + BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscation(dbw)); for (uint8_t k{0}; k < 10; ++k) { uint8_t key{k}; @@ -56,7 +47,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper) // Verify that the obfuscation key is never obfuscated { CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .obfuscate = false}}; - BOOST_CHECK(obfuscation_key == dbwrapper_private::GetObfuscation(dbw)); + BOOST_CHECK_EQUAL(obfuscation, dbwrapper_private::GetObfuscation(dbw)); } // Read back the values @@ -64,8 +55,8 @@ BOOST_AUTO_TEST_CASE(dbwrapper) CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .obfuscate = obfuscate}}; // Ensure obfuscation is read back correctly - BOOST_CHECK(obfuscation_key == dbwrapper_private::GetObfuscation(dbw)); - BOOST_CHECK_EQUAL(obfuscate, !is_null_key(obfuscation_key)); + BOOST_CHECK_EQUAL(obfuscation, dbwrapper_private::GetObfuscation(dbw)); + BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscation(dbw)); // Verify all written values for (const auto& [key, expected_value] : key_values) { @@ -89,7 +80,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data) bool res_bool; // Ensure that we're doing real obfuscation when obfuscate=true - BOOST_CHECK_EQUAL(obfuscate, !is_null_key(dbwrapper_private::GetObfuscation(dbw))); + BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscation(dbw)); //Simulate block raw data - "b + block hash" std::string key_block = "b" + m_rng.rand256().ToString(); @@ -264,7 +255,7 @@ BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate) BOOST_CHECK_EQUAL(res2.ToString(), in.ToString()); BOOST_CHECK(!odbw.IsEmpty()); - BOOST_CHECK(is_null_key(dbwrapper_private::GetObfuscation(odbw))); // The key should be an empty string + BOOST_CHECK(!dbwrapper_private::GetObfuscation(odbw)); // The key should be an empty string uint256 in2 = m_rng.rand256(); uint256 res3; @@ -301,7 +292,7 @@ BOOST_AUTO_TEST_CASE(existing_data_reindex) // Check that the key/val we wrote with unobfuscated wrapper doesn't exist uint256 res2; BOOST_CHECK(!odbw.Read(key, res2)); - BOOST_CHECK(!is_null_key(dbwrapper_private::GetObfuscation(odbw))); + BOOST_CHECK(dbwrapper_private::GetObfuscation(odbw)); uint256 in2 = m_rng.rand256(); uint256 res3; diff --git a/src/test/fuzz/autofile.cpp b/src/test/fuzz/autofile.cpp index aced09ab9b..6038c04673 100644 --- a/src/test/fuzz/autofile.cpp +++ b/src/test/fuzz/autofile.cpp @@ -22,7 +22,7 @@ FUZZ_TARGET(autofile) const auto key_bytes{ConsumeFixedLengthByteVector(fuzzed_data_provider, Obfuscation::KEY_SIZE)}; AutoFile auto_file{ fuzzed_file_provider.open(), - key_bytes, + Obfuscation{std::span{key_bytes}.first()}, }; LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 100) { diff --git a/src/test/fuzz/buffered_file.cpp b/src/test/fuzz/buffered_file.cpp index 0f3118bc6e..f4f907acbc 100644 --- a/src/test/fuzz/buffered_file.cpp +++ b/src/test/fuzz/buffered_file.cpp @@ -24,7 +24,7 @@ FUZZ_TARGET(buffered_file) const auto key_bytes{ConsumeFixedLengthByteVector(fuzzed_data_provider, Obfuscation::KEY_SIZE)}; AutoFile fuzzed_file{ fuzzed_file_provider.open(), - key_bytes, + Obfuscation{std::span{key_bytes}.first()}, }; try { auto n_buf_size = fuzzed_data_provider.ConsumeIntegralInRange(0, 4096); diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index 3f590f3279..870666b41e 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -21,10 +21,10 @@ BOOST_FIXTURE_TEST_SUITE(streams_tests, BasicTestingSetup) // Test that obfuscation can be properly reverted even with random chunk sizes. BOOST_AUTO_TEST_CASE(xor_roundtrip_random_chunks) { - auto apply_random_xor_chunks{[&](std::span target, std::span obfuscation) { + auto apply_random_xor_chunks{[&](std::span target, const Obfuscation& obfuscation) { for (size_t offset{0}; offset < target.size();) { const size_t chunk_size{1 + m_rng.randrange(target.size() - offset)}; - Obfuscation().Xor(target.subspan(offset, chunk_size), obfuscation, offset); + obfuscation(target.subspan(offset, chunk_size), offset); offset += chunk_size; } }}; @@ -35,13 +35,14 @@ BOOST_AUTO_TEST_CASE(xor_roundtrip_random_chunks) std::vector roundtrip{original}; const auto key_bytes{m_rng.randbool() ? m_rng.randbytes() : std::array{}}; - apply_random_xor_chunks(roundtrip, key_bytes); + const Obfuscation obfuscation{key_bytes}; + apply_random_xor_chunks(roundtrip, obfuscation); const bool key_all_zeros{std::ranges::all_of( std::span{key_bytes}.first(std::min(write_size, Obfuscation::KEY_SIZE)), [](auto b) { return b == std::byte{0}; })}; BOOST_CHECK(key_all_zeros ? original == roundtrip : original != roundtrip); - apply_random_xor_chunks(roundtrip, key_bytes); + apply_random_xor_chunks(roundtrip, obfuscation); BOOST_CHECK(original == roundtrip); } } @@ -62,24 +63,58 @@ BOOST_AUTO_TEST_CASE(xor_bytes_reference) const size_t write_offset{std::min(write_size, m_rng.randrange(Obfuscation::KEY_SIZE * 2))}; // Write unaligned data const auto key_bytes{m_rng.randbool() ? m_rng.randbytes() : std::array{}}; - const std::vector obfuscation{key_bytes.begin(), key_bytes.end()}; + const Obfuscation obfuscation{key_bytes}; std::vector expected{m_rng.randbytes(write_size)}; std::vector actual{expected}; expected_xor(std::span{expected}.subspan(write_offset), key_bytes, key_offset); - Obfuscation().Xor(std::span{actual}.subspan(write_offset), key_bytes, key_offset); + obfuscation(std::span{actual}.subspan(write_offset), key_offset); BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), actual.begin(), actual.end()); } } +BOOST_AUTO_TEST_CASE(obfuscation_hexkey) +{ + const auto key_bytes{m_rng.randbytes()}; + + const Obfuscation obfuscation{key_bytes}; + BOOST_CHECK_EQUAL(obfuscation.HexKey(), HexStr(key_bytes)); +} + +BOOST_AUTO_TEST_CASE(obfuscation_serialize) +{ + const Obfuscation original{m_rng.randbytes()}; + + // Serialization + DataStream ds; + ds << original; + + BOOST_CHECK_EQUAL(ds.size(), 1 + Obfuscation::KEY_SIZE); // serialized as a vector + + // Deserialization + Obfuscation recovered{}; + ds >> recovered; + + BOOST_CHECK_EQUAL(recovered.HexKey(), original.HexKey()); +} + +BOOST_AUTO_TEST_CASE(obfuscation_empty) +{ + const Obfuscation null_obf{}; + BOOST_CHECK(!null_obf); + + const Obfuscation non_null_obf{"ff00ff00ff00ff00"_hex}; + BOOST_CHECK(non_null_obf); +} + BOOST_AUTO_TEST_CASE(xor_file) { fs::path xor_path{m_args.GetDataDirBase() / "test_xor.bin"}; auto raw_file{[&](const auto& mode) { return fsbridge::fopen(xor_path, mode); }}; const std::vector test1{1, 2, 3}; const std::vector test2{4, 5}; - const auto obfuscation{"ff00ff00ff00ff00"_hex_v}; + const Obfuscation obfuscation{"ff00ff00ff00ff00"_hex}; { // Check errors for missing file @@ -284,23 +319,23 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor) // Degenerate case { DataStream ds{}; - ds.Xor("0000000000000000"_hex_v_u8); + Obfuscation{}(ds); BOOST_CHECK_EQUAL(""s, ds.str()); } { - const auto obfuscation{"ffffffffffffffff"_hex_v_u8}; + const Obfuscation obfuscation{"ffffffffffffffff"_hex}; DataStream ds{"0ff0"_hex}; - ds.Xor(obfuscation); + obfuscation(ds); BOOST_CHECK_EQUAL("\xf0\x0f"s, ds.str()); } { - const auto obfuscation{"ff0fff0fff0fff0f"_hex_v_u8}; + const Obfuscation obfuscation{"ff0fff0fff0fff0f"_hex}; DataStream ds{"f00f"_hex}; - ds.Xor(obfuscation); + obfuscation(ds); BOOST_CHECK_EQUAL("\x0f\x00"s, ds.str()); } } @@ -613,7 +648,7 @@ BOOST_AUTO_TEST_CASE(buffered_reader_matches_autofile_random_content) const FlatFilePos pos{0, 0}; const FlatFileSeq test_file{m_args.GetDataDirBase(), "buffered_file_test_random", node::BLOCKFILE_CHUNK_SIZE}; - const auto obfuscation{m_rng.randbytes(Obfuscation::KEY_SIZE)}; + const Obfuscation obfuscation{m_rng.randbytes()}; // Write out the file with random content { @@ -668,7 +703,7 @@ BOOST_AUTO_TEST_CASE(buffered_writer_matches_autofile_random_content) const FlatFileSeq test_buffered{m_args.GetDataDirBase(), "buffered_write_test", node::BLOCKFILE_CHUNK_SIZE}; const FlatFileSeq test_direct{m_args.GetDataDirBase(), "direct_write_test", node::BLOCKFILE_CHUNK_SIZE}; - const auto obfuscation{m_rng.randbytes(Obfuscation::KEY_SIZE)}; + const Obfuscation obfuscation{m_rng.randbytes()}; { DataBuffer test_data{m_rng.randbytes(file_size)}; diff --git a/src/util/obfuscation.h b/src/util/obfuscation.h index 2c098cc931..5dbad54ca2 100644 --- a/src/util/obfuscation.h +++ b/src/util/obfuscation.h @@ -7,19 +7,32 @@ #include #include +#include +#include + +#include class Obfuscation { public: - static constexpr size_t KEY_SIZE{sizeof(uint64_t)}; + using KeyType = uint64_t; + static constexpr size_t KEY_SIZE{sizeof(KeyType)}; + + Obfuscation() : m_key{KEY_SIZE, std::byte{0}} {} + explicit Obfuscation(std::span key_bytes) + { + m_key = {key_bytes.begin(), key_bytes.end()}; + } + + operator bool() const { return ToKey() != 0; } - void Xor(Span write, Span key, size_t key_offset = 0) + void operator()(Span write, size_t key_offset = 0) const { - assert(key.size() == KEY_SIZE); + assert(m_key.size() == KEY_SIZE); key_offset %= KEY_SIZE; for (size_t i = 0, j = key_offset; i != write.size(); i++) { - write[i] ^= key[j++]; + write[i] ^= m_key[j++]; // This potentially acts on very many bytes of data, so it's // important that we calculate `j`, i.e. the `key` index in this @@ -29,6 +42,34 @@ class Obfuscation j = 0; } } + + template + void Serialize(Stream& s) const + { + s << m_key; + } + + template + void Unserialize(Stream& s) + { + s >> m_key; + if (m_key.size() != KEY_SIZE) throw std::ios_base::failure(strprintf("Obfuscation key size should be exactly %s bytes long", KEY_SIZE)); + } + + std::string HexKey() const + { + return HexStr(m_key); + } + +private: + std::vector m_key; + + KeyType ToKey() const + { + KeyType key{}; + std::memcpy(&key, m_key.data(), KEY_SIZE); + return key; + } }; #endif // BITCOIN_UTIL_OBFUSCATION_H From 98d548756e34e8825597368528fbc534e76a9f56 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 2 Sep 2025 18:28:47 +0000 Subject: [PATCH 102/356] optimization: migrate fixed-size obfuscation from `std::vector` to `uint64_t` All former `std::vector` keys were replaced with `uint64_t` (we still serialize them as vectors but convert immediately to `uint64_t` on load). This is why some tests still generate vector keys and convert them to `uint64_t` later instead of generating them directly. In `Obfuscation::Unserialize` we can safely throw an `std::ios_base::failure` since during mempool fuzzing `mempool_persist.cpp#L141` catches and ignored these errors. > C++ compiler .......................... GNU 14.2.0 | ns/byte | byte/s | err% | ins/byte | cyc/byte | IPC | bra/byte | miss% | total | benchmark |--------------------:|--------------------:|--------:|----------------:|----------------:|-------:|---------------:|--------:|----------:|:---------- | 0.04 | 28,365,698,819.44 | 0.0% | 0.34 | 0.13 | 2.714 | 0.07 | 0.0% | 5.33 | `ObfuscationBench` > C++ compiler .......................... Clang 20.1.7 | ns/byte | byte/s | err% | ins/byte | cyc/byte | IPC | bra/byte | miss% | total | benchmark |--------------------:|--------------------:|--------:|----------------:|----------------:|-------:|---------------:|--------:|----------:|:---------- | 0.08 | 13,012,464,203.00 | 0.0% | 0.65 | 0.28 | 2.338 | 0.13 | 0.8% | 5.50 | `ObfuscationBench` Co-authored-by: Hodlinator <172445034+hodlinator@users.noreply.github.com> Co-authored-by: Ryan Ofsky Github-Pull: #31144 Rebased-From: e7114fc6dc3488c2584d42779ff2b102e4d1db99 --- src/util/obfuscation.h | 68 ++++++++++++++++++++++++++++-------------- 1 file changed, 45 insertions(+), 23 deletions(-) diff --git a/src/util/obfuscation.h b/src/util/obfuscation.h index 5dbad54ca2..059e697eef 100644 --- a/src/util/obfuscation.h +++ b/src/util/obfuscation.h @@ -10,6 +10,9 @@ #include #include +#include +#include +#include #include class Obfuscation @@ -18,58 +21,77 @@ class Obfuscation using KeyType = uint64_t; static constexpr size_t KEY_SIZE{sizeof(KeyType)}; - Obfuscation() : m_key{KEY_SIZE, std::byte{0}} {} + Obfuscation() { SetRotations(0); } explicit Obfuscation(std::span key_bytes) { - m_key = {key_bytes.begin(), key_bytes.end()}; + SetRotations(ToKey(key_bytes)); } - operator bool() const { return ToKey() != 0; } + operator bool() const { return m_rotations[0] != 0; } - void operator()(Span write, size_t key_offset = 0) const + void operator()(Span target, size_t key_offset = 0) const { - assert(m_key.size() == KEY_SIZE); - key_offset %= KEY_SIZE; - - for (size_t i = 0, j = key_offset; i != write.size(); i++) { - write[i] ^= m_key[j++]; - - // This potentially acts on very many bytes of data, so it's - // important that we calculate `j`, i.e. the `key` index in this - // way instead of doing a %, which would effectively be a division - // for each byte Xor'd -- much slower than need be. - if (j == KEY_SIZE) - j = 0; + if (!*this) return; + + const KeyType rot_key{m_rotations[key_offset % KEY_SIZE]}; // Continue obfuscation from where we left off + for (; target.size() >= KEY_SIZE; target = target.subspan(KEY_SIZE)) { + XorWord(target.first(KEY_SIZE), rot_key); } + XorWord(target, rot_key); } template void Serialize(Stream& s) const { - s << m_key; + // Use vector serialization for convenient compact size prefix. + std::vector bytes{KEY_SIZE}; + std::memcpy(bytes.data(), &m_rotations[0], KEY_SIZE); + s << bytes; } template void Unserialize(Stream& s) { - s >> m_key; - if (m_key.size() != KEY_SIZE) throw std::ios_base::failure(strprintf("Obfuscation key size should be exactly %s bytes long", KEY_SIZE)); + std::vector bytes{KEY_SIZE}; + s >> bytes; + if (bytes.size() != KEY_SIZE) throw std::ios_base::failure(strprintf("Obfuscation key size should be exactly %s bytes long", KEY_SIZE)); + SetRotations(ToKey(std::span(bytes))); } std::string HexKey() const { - return HexStr(m_key); + return HexStr(std::bit_cast>(m_rotations[0])); } private: - std::vector m_key; + // Cached key rotations for different offsets. + std::array m_rotations; + + void SetRotations(KeyType key) + { + for (size_t i{0}; i < KEY_SIZE; ++i) { + int key_rotation_bits{int(CHAR_BIT * i)}; + if constexpr (std::endian::native == std::endian::big) key_rotation_bits *= -1; + m_rotations[i] = std::rotr(key, key_rotation_bits); + } + } - KeyType ToKey() const + static KeyType ToKey(std::span key_span) { KeyType key{}; - std::memcpy(&key, m_key.data(), KEY_SIZE); + std::memcpy(&key, key_span.data(), KEY_SIZE); return key; } + + static void XorWord(Span target, KeyType key) + { + assert(target.size() <= KEY_SIZE); + if (target.empty()) return; + KeyType raw{}; + std::memcpy(&raw, target.data(), target.size()); + raw ^= key; + std::memcpy(target.data(), &raw, target.size()); + } }; #endif // BITCOIN_UTIL_OBFUSCATION_H From 3b9d56d33d6ee9e49e53893c08a65745e7a10e03 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 2 Sep 2025 18:46:19 +0000 Subject: [PATCH 103/356] optimization: peel align-head and unroll body to 64 bytes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Benchmarks indicated that obfuscating multiple bytes already gives an order of magnitude speed-up, but: * GCC still emitted scalar code; * Clang’s auto-vectorized loop ran on the slow unaligned-load path. Fix contains: * peeling the misaligned head enabled the hot loop starting at an 8-byte address; * `std::assume_aligned<8>` tells the optimizer the promise holds - required to keep Apple Clang happy; * manually unrolling the body to 64 bytes enabled GCC to auto-vectorize. Note that `target.size() > KEY_SIZE` condition is just an optimization, the aligned and unaligned loops work without it as well - it's why the alignment calculation still contains `std::min`. > C++ compiler .......................... GNU 14.2.0 | ns/byte | byte/s | err% | ins/byte | cyc/byte | IPC | bra/byte | miss% | total | benchmark |--------------------:|--------------------:|--------:|----------------:|----------------:|-------:|---------------:|--------:|----------:|:---------- | 0.03 | 32,464,658,919.11 | 0.0% | 0.50 | 0.11 | 4.474 | 0.08 | 0.0% | 5.29 | `ObfuscationBench` > C++ compiler .......................... Clang 20.1.7 | ns/byte | byte/s | err% | ins/byte | cyc/byte | IPC | bra/byte | miss% | total | benchmark |--------------------:|--------------------:|--------:|----------------:|----------------:|-------:|---------------:|--------:|----------:|:---------- | 0.02 | 41,231,547,045.17 | 0.0% | 0.30 | 0.09 | 3.463 | 0.02 | 0.0% | 5.47 | `ObfuscationBench` Co-authored-by: Hodlinator <172445034+hodlinator@users.noreply.github.com> Github-Pull: #31144 Rebased-From: 248b6a27c351690d3596711cc36b8102977adeab --- src/util/obfuscation.h | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/src/util/obfuscation.h b/src/util/obfuscation.h index 059e697eef..9c92e13b58 100644 --- a/src/util/obfuscation.h +++ b/src/util/obfuscation.h @@ -14,6 +14,7 @@ #include #include #include +#include class Obfuscation { @@ -33,9 +34,26 @@ class Obfuscation { if (!*this) return; - const KeyType rot_key{m_rotations[key_offset % KEY_SIZE]}; // Continue obfuscation from where we left off - for (; target.size() >= KEY_SIZE; target = target.subspan(KEY_SIZE)) { - XorWord(target.first(KEY_SIZE), rot_key); + KeyType rot_key{m_rotations[key_offset % KEY_SIZE]}; // Continue obfuscation from where we left off + if (target.size() > KEY_SIZE) { + // Obfuscate until 64-bit alignment boundary + if (const auto misalign{std::bit_cast(target.data()) % KEY_SIZE}) { + const size_t alignment{std::min(KEY_SIZE - misalign, target.size())}; + XorWord(target.first(alignment), rot_key); + + target = {std::assume_aligned(target.data() + alignment), target.size() - alignment}; + rot_key = m_rotations[(key_offset + alignment) % KEY_SIZE]; + } + // Aligned obfuscation in 64-byte chunks + for (constexpr auto unroll{8}; target.size() >= KEY_SIZE * unroll; target = target.subspan(KEY_SIZE * unroll)) { + for (size_t i{0}; i < unroll; ++i) { + XorWord(target.subspan(i * KEY_SIZE, KEY_SIZE), rot_key); + } + } + // Aligned obfuscation in 64-bit chunks + for (; target.size() >= KEY_SIZE; target = target.subspan(KEY_SIZE)) { + XorWord(target.first(KEY_SIZE), rot_key); + } } XorWord(target, rot_key); } From c8cd0f1e8ed685f591109195e6aed843e9f0786a Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 2 Sep 2025 19:05:56 +0000 Subject: [PATCH 104/356] Diff-minimise --- src/bench/CMakeLists.txt | 2 +- src/bench/{obfuscation.cpp => xor.cpp} | 6 ++-- src/dbwrapper.cpp | 44 ++++++++++++++++++++------ src/dbwrapper.h | 18 +++++++---- src/node/blockstorage.cpp | 22 ++++++------- src/node/blockstorage.h | 2 +- src/node/mempool_persist.cpp | 19 +++++------ src/streams.cpp | 6 ++-- src/streams.h | 15 +++++++-- src/test/dbwrapper_tests.cpp | 22 ++++++------- src/test/fuzz/autofile.cpp | 2 +- src/test/fuzz/buffered_file.cpp | 2 +- src/test/streams_tests.cpp | 30 ++++++++++++------ 13 files changed, 120 insertions(+), 70 deletions(-) rename src/bench/{obfuscation.cpp => xor.cpp} (83%) diff --git a/src/bench/CMakeLists.txt b/src/bench/CMakeLists.txt index eecc197b7f..16eb29250f 100644 --- a/src/bench/CMakeLists.txt +++ b/src/bench/CMakeLists.txt @@ -34,7 +34,6 @@ add_executable(bench_bitcoin mempool_eviction.cpp mempool_stress.cpp merkle_root.cpp - obfuscation.cpp parse_hex.cpp peer_eviction.cpp poly1305.cpp @@ -50,6 +49,7 @@ add_executable(bench_bitcoin strencodings.cpp util_time.cpp verify_script.cpp + xor.cpp ) include(TargetDataSources) diff --git a/src/bench/obfuscation.cpp b/src/bench/xor.cpp similarity index 83% rename from src/bench/obfuscation.cpp rename to src/bench/xor.cpp index 178be56a5d..020de08612 100644 --- a/src/bench/obfuscation.cpp +++ b/src/bench/xor.cpp @@ -4,12 +4,14 @@ #include #include +#include +#include #include #include #include -static void ObfuscationBench(benchmark::Bench& bench) +static void Xor(benchmark::Bench& bench) { FastRandomContext frc{/*fDeterministic=*/true}; auto data{frc.randbytes(1024)}; @@ -22,4 +24,4 @@ static void ObfuscationBench(benchmark::Bench& bench) }); } -BENCHMARK(ObfuscationBench, benchmark::PriorityLevel::HIGH); +BENCHMARK(Xor, benchmark::PriorityLevel::HIGH); diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp index 6e9ae2ef45..b7fdc2878c 100644 --- a/src/dbwrapper.cpp +++ b/src/dbwrapper.cpp @@ -172,7 +172,7 @@ void CDBBatch::Clear() void CDBBatch::WriteImpl(Span key, DataStream& ssValue) { leveldb::Slice slKey(CharCast(key.data()), key.size()); - dbwrapper_private::GetObfuscation(parent)(ssValue); + ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent)); leveldb::Slice slValue(CharCast(ssValue.data()), ssValue.size()); m_impl_batch->batch.Put(slKey, slValue); // LevelDB serializes writes as: @@ -256,14 +256,22 @@ CDBWrapper::CDBWrapper(const DBParams& params) LogPrintf("Finished database compaction of %s\n", fs::PathToString(params.path)); } - assert(!m_obfuscation); // Needed for unobfuscated Read()/Write() below - if (!Read(OBFUSCATION_KEY_KEY, m_obfuscation) && params.obfuscate && IsEmpty()) { - // Generate, write and read back the new obfuscation key, making sure we don't obfuscate the key itself - Write(OBFUSCATION_KEY_KEY, FastRandomContext{}.randbytes(Obfuscation::KEY_SIZE)); - Read(OBFUSCATION_KEY_KEY, m_obfuscation); - LogInfo("Wrote new obfuscation key for %s: %s", fs::PathToString(params.path), m_obfuscation.HexKey()); + assert(!obfuscate_key); // Needed for unobfuscated Read()/Write() below + + bool key_exists = Read(OBFUSCATE_KEY_KEY, obfuscate_key); + + if (!key_exists && params.obfuscate && IsEmpty()) { + // Initialize non-degenerate obfuscation if it won't upset + // existing, non-obfuscated data. + std::vector new_key = CreateObfuscateKey(); + + // Write `new_key` so we don't obfuscate the key with itself + Write(OBFUSCATE_KEY_KEY, new_key); + Read(CDBWrapper::OBFUSCATE_KEY_KEY, obfuscate_key); + + LogInfo("Wrote new obfuscation key for %s: %s", fs::PathToString(params.path), obfuscate_key.HexKey()); } - LogInfo("Using obfuscation key for %s: %s", fs::PathToString(params.path), m_obfuscation.HexKey()); + LogInfo("Using obfuscation key for %s: %s", fs::PathToString(params.path), obfuscate_key.HexKey()); } CDBWrapper::~CDBWrapper() @@ -308,6 +316,22 @@ size_t CDBWrapper::DynamicMemoryUsage() const return parsed.value(); } +// Prefixed with null character to avoid collisions with other keys +// +// We must use a string constructor which specifies length so that we copy +// past the null-terminator. +const std::string CDBWrapper::OBFUSCATE_KEY_KEY("\000obfuscate_key", 14); + +/** + * Returns a string (consisting of 8 random bytes) suitable for use as an + * obfuscating XOR key. + */ +std::vector CDBWrapper::CreateObfuscateKey() const +{ + auto ret = FastRandomContext{}.randbytes(Obfuscation::KEY_SIZE); + return ret; +} + std::optional CDBWrapper::ReadImpl(Span key) const { leveldb::Slice slKey(CharCast(key.data()), key.size()); @@ -391,9 +415,9 @@ void CDBIterator::Next() { m_impl_iter->iter->Next(); } namespace dbwrapper_private { -const Obfuscation& GetObfuscation(const CDBWrapper& w) +const Obfuscation& GetObfuscateKey(const CDBWrapper& w) { - return w.m_obfuscation; + return w.obfuscate_key; } } // namespace dbwrapper_private diff --git a/src/dbwrapper.h b/src/dbwrapper.h index 6779eeebb5..5b3afbf907 100644 --- a/src/dbwrapper.h +++ b/src/dbwrapper.h @@ -18,6 +18,7 @@ #include #include #include +#include static const size_t DBWRAPPER_PREALLOC_KEY_SIZE = 64; static const size_t DBWRAPPER_PREALLOC_VALUE_SIZE = 1024; @@ -62,7 +63,8 @@ namespace dbwrapper_private { * Database obfuscation should be considered an implementation detail of the * specific database. */ -const Obfuscation& GetObfuscation(const CDBWrapper&); +const Obfuscation& GetObfuscateKey(const CDBWrapper&); + }; // namespace dbwrapper_private bool DestroyDB(const std::string& path_str); @@ -166,7 +168,7 @@ class CDBIterator template bool GetValue(V& value) { try { DataStream ssValue{GetValueImpl()}; - dbwrapper_private::GetObfuscation(parent)(ssValue); + ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent)); ssValue >> value; } catch (const std::exception&) { return false; @@ -179,7 +181,7 @@ struct LevelDBContext; class CDBWrapper { - friend const Obfuscation& dbwrapper_private::GetObfuscation(const CDBWrapper&); + friend const Obfuscation& dbwrapper_private::GetObfuscateKey(const CDBWrapper&); private: //! holds all leveldb-specific fields of this class std::unique_ptr m_db_context; @@ -188,10 +190,12 @@ class CDBWrapper std::string m_name; //! optional XOR-obfuscation of the database - Obfuscation m_obfuscation; + Obfuscation obfuscate_key; + + //! the key under which the obfuscation key is stored + static const std::string OBFUSCATE_KEY_KEY; - //! obfuscation key storage key, null-prefixed to avoid collisions - inline static const std::string OBFUSCATION_KEY_KEY{"\000obfuscate_key", 14}; // explicit size to avoid truncation at leading \0 + std::vector CreateObfuscateKey() const; //! path to filesystem storage const fs::path m_path; @@ -223,7 +227,7 @@ class CDBWrapper } try { DataStream ssValue{MakeByteSpan(*strValue)}; - m_obfuscation(ssValue); + ssValue.Xor(obfuscate_key); ssValue >> value; } catch (const std::exception&) { return false; diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index ebbca1e954..4005bfc396 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -792,13 +792,13 @@ void BlockManager::UnlinkPrunedFiles(const std::set& setFilesToPrune) const AutoFile BlockManager::OpenBlockFile(const FlatFilePos& pos, bool fReadOnly) const { - return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_obfuscation}; + return AutoFile{m_block_file_seq.Open(pos, fReadOnly), m_xor_key}; } /** Open an undo file (rev?????.dat) */ AutoFile BlockManager::OpenUndoFile(const FlatFilePos& pos, bool fReadOnly) const { - return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_obfuscation}; + return AutoFile{m_undo_file_seq.Open(pos, fReadOnly), m_xor_key}; } fs::path BlockManager::GetBlockPosFilename(const FlatFilePos& pos) const @@ -1137,7 +1137,7 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) { // Bytes are serialized without length indicator, so this is also the exact // size of the XOR-key file. - std::array obfuscation{}; + std::array xor_key{}; // Consider this to be the first run if the blocksdir contains only hidden // files (those which start with a .). Checking for a fully-empty dir would @@ -1154,14 +1154,14 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) if (opts.use_xor && first_run) { // Only use random fresh key when the boolean option is set and on the // very first start of the program. - FastRandomContext{}.fillrand(obfuscation); + FastRandomContext{}.fillrand(xor_key); } const fs::path xor_key_path{opts.blocks_dir / "xor.dat"}; if (fs::exists(xor_key_path)) { // A pre-existing xor key file has priority. AutoFile xor_key_file{fsbridge::fopen(xor_key_path, "rb")}; - xor_key_file >> obfuscation; + xor_key_file >> xor_key; } else { // Create initial or missing xor key file AutoFile xor_key_file{fsbridge::fopen(xor_key_path, @@ -1171,7 +1171,7 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) "wbx" #endif )}; - xor_key_file << obfuscation; + xor_key_file << xor_key; if (xor_key_file.fclose() != 0) { throw std::runtime_error{strprintf("Error closing XOR key file %s: %s", fs::PathToString(xor_key_path), @@ -1179,20 +1179,20 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) } } // If the user disabled the key, it must be zero. - if (!opts.use_xor && obfuscation != decltype(obfuscation){}) { + if (!opts.use_xor && xor_key != decltype(xor_key){}) { throw std::runtime_error{ strprintf("The blocksdir XOR-key can not be disabled when a random key was already stored! " "Stored key: '%s', stored path: '%s'.", - HexStr(obfuscation), fs::PathToString(xor_key_path)), + HexStr(xor_key), fs::PathToString(xor_key_path)), }; } - LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(obfuscation)); - return Obfuscation{obfuscation}; + LogInfo("Using obfuscation key for blocksdir *.dat files (%s): '%s'\n", fs::PathToString(opts.blocks_dir), HexStr(xor_key)); + return Obfuscation{xor_key}; } BlockManager::BlockManager(const util::SignalInterrupt& interrupt, Options opts) : m_prune_mode{opts.prune_target > 0}, - m_obfuscation{InitBlocksdirXorKey(opts)}, + m_xor_key{InitBlocksdirXorKey(opts)}, m_opts{std::move(opts)}, m_block_file_seq{FlatFileSeq{m_opts.blocks_dir, "blk", m_opts.fast_prune ? 0x4000 /* 16kB */ : BLOCKFILE_CHUNK_SIZE}}, m_undo_file_seq{FlatFileSeq{m_opts.blocks_dir, "rev", UNDOFILE_CHUNK_SIZE}}, diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 9022139a07..ff00e9b292 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -235,7 +235,7 @@ class BlockManager const bool m_prune_mode; - const Obfuscation m_obfuscation; + const Obfuscation m_xor_key; /** Dirty block index entries. */ std::set m_dirty_blockindex; diff --git a/src/node/mempool_persist.cpp b/src/node/mempool_persist.cpp index 99dfdb6deb..f127a94768 100644 --- a/src/node/mempool_persist.cpp +++ b/src/node/mempool_persist.cpp @@ -60,17 +60,15 @@ bool LoadMempool(CTxMemPool& pool, const fs::path& load_path, Chainstate& active try { uint64_t version; file >> version; - + Obfuscation xor_key{}; if (version == MEMPOOL_DUMP_VERSION_NO_XOR_KEY) { - file.SetObfuscation({}); + // Leave XOR-key empty } else if (version == MEMPOOL_DUMP_VERSION) { - Obfuscation obfuscation; - file >> obfuscation; - file.SetObfuscation(obfuscation); + file >> xor_key; } else { return false; } - + file.SetXor(xor_key); uint64_t total_txns_to_load; file >> total_txns_to_load; uint64_t txns_tried = 0; @@ -181,13 +179,12 @@ bool DumpMempool(const CTxMemPool& pool, const fs::path& dump_path, FopenFn mock const uint64_t version{pool.m_opts.persist_v1_dat ? MEMPOOL_DUMP_VERSION_NO_XOR_KEY : MEMPOOL_DUMP_VERSION}; file << version; + Obfuscation xor_key{}; if (!pool.m_opts.persist_v1_dat) { - const Obfuscation obfuscation{FastRandomContext{}.randbytes()}; - file << obfuscation; - file.SetObfuscation(obfuscation); - } else { - file.SetObfuscation({}); + xor_key = Obfuscation{FastRandomContext{}.randbytes()}; + file << xor_key; } + file.SetXor(xor_key); uint64_t mempool_transactions_to_write(vinfo.size()); file << mempool_transactions_to_write; diff --git a/src/streams.cpp b/src/streams.cpp index d0025b2c44..dce538db42 100644 --- a/src/streams.cpp +++ b/src/streams.cpp @@ -21,12 +21,12 @@ AutoFile::AutoFile(std::FILE* file, const Obfuscation& obfuscation) : m_file{fil std::size_t AutoFile::detail_fread(Span dst) { if (!m_file) throw std::ios_base::failure("AutoFile::read: file handle is nullptr"); - const size_t ret = std::fread(dst.data(), 1, dst.size(), m_file); + size_t ret = std::fread(dst.data(), 1, dst.size(), m_file); if (m_obfuscation) { - if (!m_position) throw std::ios_base::failure("AutoFile::read: position unknown"); + if (!m_position.has_value()) throw std::ios_base::failure("AutoFile::read: position unknown"); m_obfuscation(dst.subspan(0, ret), *m_position); } - if (m_position) *m_position += ret; + if (m_position.has_value()) *m_position += ret; return ret; } diff --git a/src/streams.h b/src/streams.h index 641211865f..5cc79cfa84 100644 --- a/src/streams.h +++ b/src/streams.h @@ -26,6 +26,7 @@ #include #include #include +#include #include /* Minimal stream for overwriting and/or appending to an existing byte vector @@ -244,13 +245,23 @@ class DataStream return (*this); } - template + template DataStream& operator>>(T&& obj) { ::Unserialize(*this, obj); return (*this); } + /** + * XOR the contents of this stream with a certain key. + * + * @param[in] key The key used to XOR the data in this stream. + */ + void Xor(const Obfuscation& key) + { + key(*this); + } + /** Compute total memory usage of this object (own memory + any dynamic memory). */ size_t GetMemoryUsage() const noexcept; }; @@ -424,7 +435,7 @@ class AutoFile bool IsNull() const { return m_file == nullptr; } /** Continue with a different XOR key */ - void SetObfuscation(const Obfuscation& obfuscation) { m_obfuscation = obfuscation; } + void SetXor(const Obfuscation& obfuscation) { m_obfuscation = obfuscation; } /** Implementation detail, only used internally. */ std::size_t detail_fread(Span dst); diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp index cd0f347b66..781f43b2b6 100644 --- a/src/test/dbwrapper_tests.cpp +++ b/src/test/dbwrapper_tests.cpp @@ -33,8 +33,8 @@ BOOST_AUTO_TEST_CASE(dbwrapper) BOOST_CHECK_EQUAL(obfuscate, !dbw.IsEmpty()); // Ensure that we're doing real obfuscation when obfuscate=true - obfuscation = dbwrapper_private::GetObfuscation(dbw); - BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscation(dbw)); + obfuscation = dbwrapper_private::GetObfuscateKey(dbw); + BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscateKey(dbw)); for (uint8_t k{0}; k < 10; ++k) { uint8_t key{k}; @@ -47,7 +47,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper) // Verify that the obfuscation key is never obfuscated { CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .obfuscate = false}}; - BOOST_CHECK_EQUAL(obfuscation, dbwrapper_private::GetObfuscation(dbw)); + BOOST_CHECK_EQUAL(obfuscation, dbwrapper_private::GetObfuscateKey(dbw)); } // Read back the values @@ -55,8 +55,8 @@ BOOST_AUTO_TEST_CASE(dbwrapper) CDBWrapper dbw{{.path = path, .cache_bytes = CACHE_SIZE, .obfuscate = obfuscate}}; // Ensure obfuscation is read back correctly - BOOST_CHECK_EQUAL(obfuscation, dbwrapper_private::GetObfuscation(dbw)); - BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscation(dbw)); + BOOST_CHECK_EQUAL(obfuscation, dbwrapper_private::GetObfuscateKey(dbw)); + BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscateKey(dbw)); // Verify all written values for (const auto& [key, expected_value] : key_values) { @@ -80,7 +80,7 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data) bool res_bool; // Ensure that we're doing real obfuscation when obfuscate=true - BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscation(dbw)); + BOOST_CHECK_EQUAL(obfuscate, dbwrapper_private::GetObfuscateKey(dbw)); //Simulate block raw data - "b + block hash" std::string key_block = "b" + m_rng.rand256().ToString(); @@ -139,13 +139,13 @@ BOOST_AUTO_TEST_CASE(dbwrapper_basic_data) std::string file_option_tag = "F"; uint8_t filename_length = m_rng.randbits(8); std::string filename = "randomfilename"; - std::string key_file_option = strprintf("%s%01x%s", file_option_tag, filename_length, filename); + std::string key_file_option = strprintf("%s%01x%s", file_option_tag,filename_length,filename); bool in_file_bool = m_rng.randbool(); BOOST_CHECK(dbw.Write(key_file_option, in_file_bool)); BOOST_CHECK(dbw.Read(key_file_option, res_bool)); BOOST_CHECK_EQUAL(res_bool, in_file_bool); - } + } } // Test batch operations @@ -254,8 +254,8 @@ BOOST_AUTO_TEST_CASE(existing_data_no_obfuscate) BOOST_CHECK(odbw.Read(key, res2)); BOOST_CHECK_EQUAL(res2.ToString(), in.ToString()); - BOOST_CHECK(!odbw.IsEmpty()); - BOOST_CHECK(!dbwrapper_private::GetObfuscation(odbw)); // The key should be an empty string + BOOST_CHECK(!odbw.IsEmpty()); // There should be existing data + BOOST_CHECK(!dbwrapper_private::GetObfuscateKey(odbw)); // The key should be an empty string uint256 in2 = m_rng.rand256(); uint256 res3; @@ -292,7 +292,7 @@ BOOST_AUTO_TEST_CASE(existing_data_reindex) // Check that the key/val we wrote with unobfuscated wrapper doesn't exist uint256 res2; BOOST_CHECK(!odbw.Read(key, res2)); - BOOST_CHECK(dbwrapper_private::GetObfuscation(odbw)); + BOOST_CHECK(dbwrapper_private::GetObfuscateKey(odbw)); uint256 in2 = m_rng.rand256(); uint256 res3; diff --git a/src/test/fuzz/autofile.cpp b/src/test/fuzz/autofile.cpp index 6038c04673..d6ac59786d 100644 --- a/src/test/fuzz/autofile.cpp +++ b/src/test/fuzz/autofile.cpp @@ -4,8 +4,8 @@ #include #include -#include #include +#include #include #include diff --git a/src/test/fuzz/buffered_file.cpp b/src/test/fuzz/buffered_file.cpp index f4f907acbc..1bc3b4ba8f 100644 --- a/src/test/fuzz/buffered_file.cpp +++ b/src/test/fuzz/buffered_file.cpp @@ -4,8 +4,8 @@ #include #include -#include #include +#include #include #include diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index 870666b41e..20f5714947 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -114,11 +114,11 @@ BOOST_AUTO_TEST_CASE(xor_file) auto raw_file{[&](const auto& mode) { return fsbridge::fopen(xor_path, mode); }}; const std::vector test1{1, 2, 3}; const std::vector test2{4, 5}; - const Obfuscation obfuscation{"ff00ff00ff00ff00"_hex}; + const Obfuscation xor_pat{"ff00ff00ff00ff00"_hex}; { // Check errors for missing file - AutoFile xor_file{raw_file("rb"), obfuscation}; + AutoFile xor_file{raw_file("rb"), xor_pat}; BOOST_CHECK_EXCEPTION(xor_file << std::byte{}, std::ios_base::failure, HasReason{"AutoFile::write: file handle is nullptr"}); BOOST_CHECK_EXCEPTION(xor_file >> std::byte{}, std::ios_base::failure, HasReason{"AutoFile::read: file handle is nullptr"}); BOOST_CHECK_EXCEPTION(xor_file.ignore(1), std::ios_base::failure, HasReason{"AutoFile::ignore: file handle is nullptr"}); @@ -130,7 +130,7 @@ BOOST_AUTO_TEST_CASE(xor_file) #else const char* mode = "wbx"; #endif - AutoFile xor_file{raw_file(mode), obfuscation}; + AutoFile xor_file{raw_file(mode), xor_pat}; xor_file << test1 << test2; BOOST_REQUIRE_EQUAL(xor_file.fclose(), 0); } @@ -144,7 +144,7 @@ BOOST_AUTO_TEST_CASE(xor_file) BOOST_CHECK_EXCEPTION(non_xor_file.ignore(1), std::ios_base::failure, HasReason{"AutoFile::ignore: end of file"}); } { - AutoFile xor_file{raw_file("rb"), obfuscation}; + AutoFile xor_file{raw_file("rb"), xor_pat}; std::vector read1, read2; xor_file >> read1 >> read2; BOOST_CHECK_EQUAL(HexStr(read1), HexStr(test1)); @@ -153,7 +153,7 @@ BOOST_AUTO_TEST_CASE(xor_file) BOOST_CHECK_EXCEPTION(xor_file >> std::byte{}, std::ios_base::failure, HasReason{"AutoFile::read: end of file"}); } { - AutoFile xor_file{raw_file("rb"), obfuscation}; + AutoFile xor_file{raw_file("rb"), xor_pat}; std::vector read2; // Check that ignore works xor_file.ignore(4); @@ -169,7 +169,7 @@ BOOST_AUTO_TEST_CASE(streams_vector_writer) { unsigned char a(1); unsigned char b(2); - unsigned char bytes[] = {3, 4, 5, 6}; + unsigned char bytes[] = { 3, 4, 5, 6 }; std::vector vch; // Each test runs twice. Serializing a second time at the same starting @@ -316,25 +316,37 @@ BOOST_AUTO_TEST_CASE(bitstream_reader_writer) BOOST_AUTO_TEST_CASE(streams_serializedata_xor) { + std::vector in; + // Degenerate case { - DataStream ds{}; + DataStream ds{in}; Obfuscation{}(ds); BOOST_CHECK_EQUAL(""s, ds.str()); } + in.push_back(std::byte{0x0f}); + in.push_back(std::byte{0xf0}); + + // Single character key { const Obfuscation obfuscation{"ffffffffffffffff"_hex}; - DataStream ds{"0ff0"_hex}; + DataStream ds{in}; obfuscation(ds); BOOST_CHECK_EQUAL("\xf0\x0f"s, ds.str()); } + // Multi character key + + in.clear(); + in.push_back(std::byte{0xf0}); + in.push_back(std::byte{0x0f}); + { const Obfuscation obfuscation{"ff0fff0fff0fff0f"_hex}; - DataStream ds{"f00f"_hex}; + DataStream ds{in}; obfuscation(ds); BOOST_CHECK_EQUAL("\x0f\x00"s, ds.str()); } From 6b3c1dbc5c0df4357ee7f57ac238bcdff55526af Mon Sep 17 00:00:00 2001 From: amisha Date: Wed, 10 Sep 2025 21:04:57 +0530 Subject: [PATCH 105/356] contrib: fix using macdploy script without translations. QT translations are optional, but the script would error when 'translations_dir' falls back to its default value NULL. This PR fixes it by moving the set-up of QT translations under the check for 'translations_dir' presence. Github-Pull: #33482 Rebased-From: 7b5261f7ef3d88361204c40eb10c0d9dc44f5ed7 --- contrib/macdeploy/macdeployqtplus | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus index eaa7b896be..5120537626 100755 --- a/contrib/macdeploy/macdeployqtplus +++ b/contrib/macdeploy/macdeployqtplus @@ -465,18 +465,18 @@ if config.translations_dir: sys.stderr.write(f"Error: Could not find translation dir \"{config.translations_dir[0]}\"\n") sys.exit(1) -print("+ Adding Qt translations +") + print("+ Adding Qt translations +") -translations = Path(config.translations_dir[0]) + translations = Path(config.translations_dir[0]) -regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)') + regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)') -lang_files = [x for x in translations.iterdir() if regex.match(x.name)] + lang_files = [x for x in translations.iterdir() if regex.match(x.name)] -for file in lang_files: - if verbose: - print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name)) - shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name)) + for file in lang_files: + if verbose: + print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name)) + shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name)) # ------------------------------------------------ From eea16f7de7c4382e4491f3b018ecd0c36678affb Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 3 Oct 2025 14:27:43 +0100 Subject: [PATCH 106/356] build: bump version to v29.2rc2 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 05a86a1d97..681926a1e4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,7 +30,7 @@ set(CLIENT_NAME "Bitcoin Core") set(CLIENT_VERSION_MAJOR 29) set(CLIENT_VERSION_MINOR 2) set(CLIENT_VERSION_BUILD 0) -set(CLIENT_VERSION_RC 1) +set(CLIENT_VERSION_RC 2) set(CLIENT_VERSION_IS_RELEASE "true") set(COPYRIGHT_YEAR "2025") From 513cef75ee06bc5d310a22d366a5f3c815aa1499 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 3 Oct 2025 14:39:17 +0100 Subject: [PATCH 107/356] doc: update manual pages for v29.2rc2 --- doc/man/bitcoin-cli.1 | 6 +++--- doc/man/bitcoin-qt.1 | 6 +++--- doc/man/bitcoin-tx.1 | 6 +++--- doc/man/bitcoin-util.1 | 6 +++--- doc/man/bitcoin-wallet.1 | 6 +++--- doc/man/bitcoind.1 | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index a8dc092a6c..707ccfc322 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "September 2025" "bitcoin-cli v29.2.0rc1" "User Commands" +.TH BITCOIN-CLI "1" "October 2025" "bitcoin-cli v29.2.0rc2" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc1 +bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc2 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc1 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Core RPC client version v29.2.0rc1 +Bitcoin Core RPC client version v29.2.0rc2 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server. .PP diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index 7821b8fb44..a02b52ea2e 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "September 2025" "bitcoin-qt v29.2.0rc1" "User Commands" +.TH BITCOIN-QT "1" "October 2025" "bitcoin-qt v29.2.0rc2" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.2.0rc1 +bitcoin-qt \- manual page for bitcoin-qt v29.2.0rc2 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Core version v29.2.0rc1 +Bitcoin Core version v29.2.0rc2 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core. .PP diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index a14a6be602..a7df27545c 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "September 2025" "bitcoin-tx v29.2.0rc1" "User Commands" +.TH BITCOIN-TX "1" "October 2025" "bitcoin-tx v29.2.0rc2" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc1 +bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc2 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc1 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Core bitcoin\-tx utility version v29.2.0rc1 +Bitcoin Core bitcoin\-tx utility version v29.2.0rc2 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index e0cc27e2d7..d127ddf4cb 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "September 2025" "bitcoin-util v29.2.0rc1" "User Commands" +.TH BITCOIN-UTIL "1" "October 2025" "bitcoin-util v29.2.0rc2" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.2.0rc1 +bitcoin-util \- manual page for bitcoin-util v29.2.0rc2 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.2.0rc1 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-util utility version v29.2.0rc1 +Bitcoin Core bitcoin\-util utility version v29.2.0rc2 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index 58bbf2715b..b471ac1a24 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "September 2025" "bitcoin-wallet v29.2.0rc1" "User Commands" +.TH BITCOIN-WALLET "1" "October 2025" "bitcoin-wallet v29.2.0rc2" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0rc1 +bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0rc2 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-wallet utility version v29.2.0rc1 +Bitcoin Core bitcoin\-wallet utility version v29.2.0rc2 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files. .PP diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index 0846f3e061..3e7e394b1a 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "September 2025" "bitcoind v29.2.0rc1" "User Commands" +.TH BITCOIND "1" "October 2025" "bitcoind v29.2.0rc2" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.2.0rc1 +bitcoind \- manual page for bitcoind v29.2.0rc2 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Core daemon version v29.2.0rc1 +Bitcoin Core daemon version v29.2.0rc2 .PP The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP From d82fc69829cd8cabbaf2c3a969597b40c32edc86 Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 3 Oct 2025 18:04:23 +0100 Subject: [PATCH 108/356] doc: update release notes for 29.2rc2 --- doc/release-notes.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/release-notes.md b/doc/release-notes.md index 8a79e99ad2..30cb763a0c 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -53,6 +53,7 @@ Notable changes ### CI +- #32989 ci: Migrate CI to hosted Cirrus Runners - #32999 ci: Use APT_LLVM_V in msan task - #33099 ci: allow for any libc++ intrumentation & use it for TSAN - #33258 ci: use LLVM 21 @@ -66,12 +67,14 @@ Notable changes - #33310 trace: Workaround GCC bug compiling with old systemtap - #33340 Fix benchmark CSV output +- #33482 contrib: fix macOS deployment with no translations Credits ======= Thanks to everyone who directly contributed to this release: +- Amisha Chhajed - Eugene Siegel - fanquake - Greg Sanders @@ -82,6 +85,7 @@ Thanks to everyone who directly contributed to this release: - Sebastian Falbesoner - Sjors Provoost - Vasil Dimov +- Will Clark As well as to everyone that helped with translations on [Transifex](https://explore.transifex.com/bitcoin/bitcoin/). From abf4a6eeaee116917dafd56eb9caee03e13048d2 Mon Sep 17 00:00:00 2001 From: fanquake Date: Tue, 7 Oct 2025 13:31:04 +0100 Subject: [PATCH 109/356] build: fix depends Qt download link --- depends/packages/qt.mk | 2 +- doc/dependencies.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index d41ac4e784..abd8a6fa8d 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -1,6 +1,6 @@ package=qt $(package)_version=5.15.16 -$(package)_download_path=https://download.qt.io/official_releases/qt/5.15/$($(package)_version)/submodules +$(package)_download_path=https://download.qt.io/archive/qt/5.15/$($(package)_version)/submodules $(package)_suffix=everywhere-opensource-src-$($(package)_version).tar.xz $(package)_file_name=qtbase-$($(package)_suffix) $(package)_sha256_hash=b04815058c18058b6ba837206756a2c87d1391f07a0dcb0dd314f970fd041592 diff --git a/doc/dependencies.md b/doc/dependencies.md index 7c866a433d..d3f6b74367 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -30,7 +30,7 @@ Bitcoin Core requires one of the following compilers. | [Fontconfig](../depends/packages/fontconfig.mk) (gui) | [link](https://www.freedesktop.org/wiki/Software/fontconfig/) | [2.12.6](https://github.com/bitcoin/bitcoin/pull/23495) | 2.6 | Yes | | [FreeType](../depends/packages/freetype.mk) (gui) | [link](https://freetype.org) | [2.11.0](https://github.com/bitcoin/bitcoin/commit/01544dd78ccc0b0474571da854e27adef97137fb) | 2.3.0 | Yes | | [qrencode](../depends/packages/qrencode.mk) (gui) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | N/A | No | -| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/official_releases/qt/) | [5.15.16](https://github.com/bitcoin/bitcoin/pull/30774) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | +| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/archive/qt/) | [5.15.16](https://github.com/bitcoin/bitcoin/pull/30774) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | | [ZeroMQ](../depends/packages/zeromq.mk) (notifications) | [link](https://github.com/zeromq/libzmq/releases) | [4.3.4](https://github.com/bitcoin/bitcoin/pull/23956) | 4.0.0 | No | | [Berkeley DB](../depends/packages/bdb.mk) (legacy wallet) | [link](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.30 | 4.8.x | No | | [SQLite](../depends/packages/sqlite.mk) (wallet) | [link](https://sqlite.org) | [3.38.5](https://github.com/bitcoin/bitcoin/pull/25378) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No | From 3226616493289b111997bb107e569fef54386743 Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 6 Oct 2025 16:35:18 +0100 Subject: [PATCH 110/356] doc: update release notes for 29.2 --- doc/release-notes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index 30cb763a0c..b981b8a7f5 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.2rc2 is now available from: +Bitcoin Core version 29.2 is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. From b2026fa290f0aef9a0dcfe45750121f113e2ce7d Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 6 Oct 2025 16:35:38 +0100 Subject: [PATCH 111/356] build: bump version to v29.2 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 681926a1e4..70f672132b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,7 +30,7 @@ set(CLIENT_NAME "Bitcoin Core") set(CLIENT_VERSION_MAJOR 29) set(CLIENT_VERSION_MINOR 2) set(CLIENT_VERSION_BUILD 0) -set(CLIENT_VERSION_RC 2) +set(CLIENT_VERSION_RC 0) set(CLIENT_VERSION_IS_RELEASE "true") set(COPYRIGHT_YEAR "2025") From 46d9b9091baa096da30da5e14329a32f1264229a Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 6 Oct 2025 16:39:57 +0100 Subject: [PATCH 112/356] doc: update manual pages for v29.2 --- doc/man/bitcoin-cli.1 | 6 +++--- doc/man/bitcoin-qt.1 | 6 +++--- doc/man/bitcoin-tx.1 | 6 +++--- doc/man/bitcoin-util.1 | 6 +++--- doc/man/bitcoin-wallet.1 | 6 +++--- doc/man/bitcoind.1 | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index 707ccfc322..ce6f35c198 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "October 2025" "bitcoin-cli v29.2.0rc2" "User Commands" +.TH BITCOIN-CLI "1" "October 2025" "bitcoin-cli v29.2.0" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc2 +bitcoin-cli \- manual page for bitcoin-cli v29.2.0 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.2.0rc2 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Core RPC client version v29.2.0rc2 +Bitcoin Core RPC client version v29.2.0 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server. .PP diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index a02b52ea2e..5efc9e9617 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "October 2025" "bitcoin-qt v29.2.0rc2" "User Commands" +.TH BITCOIN-QT "1" "October 2025" "bitcoin-qt v29.2.0" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.2.0rc2 +bitcoin-qt \- manual page for bitcoin-qt v29.2.0 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Core version v29.2.0rc2 +Bitcoin Core version v29.2.0 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core. .PP diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index a7df27545c..90a233619f 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "October 2025" "bitcoin-tx v29.2.0rc2" "User Commands" +.TH BITCOIN-TX "1" "October 2025" "bitcoin-tx v29.2.0" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc2 +bitcoin-tx \- manual page for bitcoin-tx v29.2.0 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.2.0rc2 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Core bitcoin\-tx utility version v29.2.0rc2 +Bitcoin Core bitcoin\-tx utility version v29.2.0 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index d127ddf4cb..4186bd3f5a 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "October 2025" "bitcoin-util v29.2.0rc2" "User Commands" +.TH BITCOIN-UTIL "1" "October 2025" "bitcoin-util v29.2.0" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.2.0rc2 +bitcoin-util \- manual page for bitcoin-util v29.2.0 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.2.0rc2 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-util utility version v29.2.0rc2 +Bitcoin Core bitcoin\-util utility version v29.2.0 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index b471ac1a24..97c6144f81 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "October 2025" "bitcoin-wallet v29.2.0rc2" "User Commands" +.TH BITCOIN-WALLET "1" "October 2025" "bitcoin-wallet v29.2.0" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0rc2 +bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-wallet utility version v29.2.0rc2 +Bitcoin Core bitcoin\-wallet utility version v29.2.0 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files. .PP diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index 3e7e394b1a..82804a50c8 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "October 2025" "bitcoind v29.2.0rc2" "User Commands" +.TH BITCOIND "1" "October 2025" "bitcoind v29.2.0" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.2.0rc2 +bitcoind \- manual page for bitcoind v29.2.0 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Core daemon version v29.2.0rc2 +Bitcoin Core daemon version v29.2.0 .PP The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP From 85a13e943a0491d4b72ba618209eef70c55fa97e Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 10 Oct 2025 13:12:02 +0000 Subject: [PATCH 113/356] Bugfix: torcontrol: Map bind-any to loopback address Technically connecting to 0.0.0.0 works on *nix, but it is undefined behaviour and does not work on Windows --- src/init.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/init.cpp b/src/init.cpp index 9afd76d62d..cf72891b94 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1742,6 +1742,16 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) "for the automatically created Tor onion service."), onion_service_target.ToStringIPPort())); } + if (onion_service_target.IsBindAny()) { + CNetAddr loopback_addr = onion_service_target; + // NOTE: GetNetwork is not_publicly_routable here + if (onion_service_target.ToStringAddr() == "0.0.0.0") { + loopback_addr = LookupHost("127.0.0.1", /*fAllowLookup=*/false).value(); + } else { + loopback_addr = LookupHost("[::1]", /*fAllowLookup=*/false).value(); + } + onion_service_target.SetIP(loopback_addr); + } StartTorControl(onion_service_target); } From 5f7f1cf1816b96e29d11db5c34e0e45622c6daab Mon Sep 17 00:00:00 2001 From: merge-script Date: Fri, 10 Oct 2025 13:44:36 +0000 Subject: [PATCH 114/356] Revert "Merge ci_gha_makejobs_8" This reverts commit 8cd076e06ab69b9f37ba0ff17ba8426f013976c2, reversing changes made to 8db24138af3db039c2a1c3d3cd75a11d60f7c237. --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bfb820214a..2cac4eab0b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -19,7 +19,7 @@ concurrency: env: CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error - MAKEJOBS: '-j8' + MAKEJOBS: '-j10' jobs: test-each-commit: From e949bff636d4fb0b0c3ccdc66003f2b05c73eb76 Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Mon, 22 Sep 2025 16:15:48 -0400 Subject: [PATCH 115/356] net: use generic network key for addrcache The generic key can also be used in other places where behavior between different network identities should be uncorrelated to avoid fingerprinting. This also changes RANDOMIZER_ID - since it is not being persisted to disk, there are no compatibility issues. Github-Pull: #33464 Rebased-From: 94db966a3bb52a3677eb5f762447202ed3889f0f --- src/net.cpp | 28 +++++++++++++++++--------- src/net.h | 5 +++++ src/test/denialofservice_tests.cpp | 18 +++++++++++------ src/test/fuzz/p2p_headers_presync.cpp | 2 +- src/test/fuzz/util/net.h | 4 ++++ src/test/net_peer_connection_tests.cpp | 3 ++- src/test/net_tests.cpp | 27 ++++++++++++++++--------- 7 files changed, 61 insertions(+), 26 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 735985a841..96b73e032e 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -115,7 +115,7 @@ const std::string NET_MESSAGE_TYPE_OTHER = "*other*"; static const uint64_t RANDOMIZER_ID_NETGROUP = 0x6c0edd8036ef4036ULL; // SHA256("netgroup")[0:8] static const uint64_t RANDOMIZER_ID_LOCALHOSTNONCE = 0xd93e69e2bbfa5735ULL; // SHA256("localhostnonce")[0:8] -static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL; // SHA256("addrcache")[0:8] +static const uint64_t RANDOMIZER_ID_NETWORKKEY = 0x0e8a2b136c592a7dULL; // SHA256("networkkey")[0:8] // // Global state variables // @@ -531,6 +531,13 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo if (!addr_bind.IsValid()) { addr_bind = GetBindAddress(*sock); } + uint64_t network_id = GetDeterministicRandomizer(RANDOMIZER_ID_NETWORKKEY) + .Write(target_addr.GetNetClass()) + .Write(addr_bind.GetAddrBytes()) + // For outbound connections, the port of the bound address is randomly + // assigned by the OS and would therefore not be useful for seeding. + .Write(0) + .Finalize(); CNode* pnode = new CNode(id, std::move(sock), target_addr, @@ -540,6 +547,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo pszDest ? pszDest : "", conn_type, /*inbound_onion=*/false, + network_id, CNodeOptions{ .permission_flags = permission_flags, .i2p_sam_session = std::move(i2p_transient_session), @@ -1828,6 +1836,11 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, ServiceFlags local_services = GetLocalServices(); const bool use_v2transport(local_services & NODE_P2P_V2); + uint64_t network_id = GetDeterministicRandomizer(RANDOMIZER_ID_NETWORKKEY) + .Write(inbound_onion ? NET_ONION : addr.GetNetClass()) + .Write(addr_bind.GetAddrBytes()) + .Write(addr_bind.GetPort()) // inbound connections use bind port + .Finalize(); CNode* pnode = new CNode(id, std::move(sock), CAddress{addr, NODE_NONE}, @@ -1837,6 +1850,7 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, /*addrNameIn=*/"", ConnectionType::INBOUND, inbound_onion, + network_id, CNodeOptions{ .permission_flags = permission_flags, .prefer_evict = discouraged, @@ -3514,15 +3528,9 @@ std::vector CConnman::GetAddresses(size_t max_addresses, size_t max_pc std::vector CConnman::GetAddresses(CNode& requestor, size_t max_addresses, size_t max_pct) { auto local_socket_bytes = requestor.addrBind.GetAddrBytes(); - uint64_t cache_id = GetDeterministicRandomizer(RANDOMIZER_ID_ADDRCACHE) - .Write(requestor.ConnectedThroughNetwork()) - .Write(local_socket_bytes) - // For outbound connections, the port of the bound address is randomly - // assigned by the OS and would therefore not be useful for seeding. - .Write(requestor.IsInboundConn() ? requestor.addrBind.GetPort() : 0) - .Finalize(); + uint64_t network_id = requestor.m_network_key; const auto current_time = GetTime(); - auto r = m_addr_response_caches.emplace(cache_id, CachedAddrResponse{}); + auto r = m_addr_response_caches.emplace(network_id, CachedAddrResponse{}); CachedAddrResponse& cache_entry = r.first->second; if (cache_entry.m_cache_entry_expiration < current_time) { // If emplace() added new one it has expiration 0. cache_entry.m_addrs_response_cache = GetAddresses(max_addresses, max_pct, /*network=*/std::nullopt); @@ -3799,6 +3807,7 @@ CNode::CNode(NodeId idIn, const std::string& addrNameIn, ConnectionType conn_type_in, bool inbound_onion, + uint64_t network_key, CNodeOptions&& node_opts) : m_transport{MakeTransport(idIn, node_opts.use_v2transport, conn_type_in == ConnectionType::INBOUND)}, m_permission_flags{node_opts.permission_flags}, @@ -3811,6 +3820,7 @@ CNode::CNode(NodeId idIn, m_inbound_onion{inbound_onion}, m_prefer_evict{node_opts.prefer_evict}, nKeyedNetGroup{nKeyedNetGroupIn}, + m_network_key{network_key}, m_conn_type{conn_type_in}, id{idIn}, nLocalHostNonce{nLocalHostNonceIn}, diff --git a/src/net.h b/src/net.h index e64d9a67f4..c174dc0dc6 100644 --- a/src/net.h +++ b/src/net.h @@ -736,6 +736,10 @@ class CNode std::atomic_bool fPauseRecv{false}; std::atomic_bool fPauseSend{false}; + /** Network key used to prevent fingerprinting our node across networks. + * Influenced by the network and the bind address (+ bind port for inbounds) */ + const uint64_t m_network_key; + const ConnectionType m_conn_type; /** Move all messages from the received queue to the processing queue. */ @@ -887,6 +891,7 @@ class CNode const std::string& addrNameIn, ConnectionType conn_type_in, bool inbound_onion, + uint64_t network_key, CNodeOptions&& node_opts = {}); CNode(const CNode&) = delete; CNode& operator=(const CNode&) = delete; diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index 9ee7e9c9fe..3bb164eac9 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -62,7 +62,8 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) CAddress(), /*addrNameIn=*/"", ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/0}; connman.Handshake( /*node=*/dummyNode1, @@ -128,7 +129,8 @@ void AddRandomOutboundPeer(NodeId& id, std::vector& vNodes, PeerManager& CAddress(), /*addrNameIn=*/"", connType, - /*inbound_onion=*/false}); + /*inbound_onion=*/false, + /*network_key=*/0}); CNode &node = *vNodes.back(); node.SetCommonVersion(PROTOCOL_VERSION); @@ -327,7 +329,8 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) CAddress(), /*addrNameIn=*/"", ConnectionType::INBOUND, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/1}; nodes[0]->SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(*nodes[0], NODE_NETWORK); nodes[0]->fSuccessfullyConnected = true; @@ -347,7 +350,8 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) CAddress(), /*addrNameIn=*/"", ConnectionType::INBOUND, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/1}; nodes[1]->SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(*nodes[1], NODE_NETWORK); nodes[1]->fSuccessfullyConnected = true; @@ -377,7 +381,8 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) CAddress(), /*addrNameIn=*/"", ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/2}; nodes[2]->SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(*nodes[2], NODE_NETWORK); nodes[2]->fSuccessfullyConnected = true; @@ -419,7 +424,8 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) CAddress(), /*addrNameIn=*/"", ConnectionType::INBOUND, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/1}; dummyNode.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(dummyNode, NODE_NETWORK); dummyNode.fSuccessfullyConnected = true; diff --git a/src/test/fuzz/p2p_headers_presync.cpp b/src/test/fuzz/p2p_headers_presync.cpp index ed7041ad1f..d4cdc384bb 100644 --- a/src/test/fuzz/p2p_headers_presync.cpp +++ b/src/test/fuzz/p2p_headers_presync.cpp @@ -60,7 +60,7 @@ void HeadersSyncSetup::ResetAndInitialize() for (auto conn_type : conn_types) { CAddress addr{}; - m_connections.push_back(new CNode(id++, nullptr, addr, 0, 0, addr, "", conn_type, false)); + m_connections.push_back(new CNode(id++, nullptr, addr, 0, 0, addr, "", conn_type, false, 0)); CNode& p2p_node = *m_connections.back(); connman.Handshake( diff --git a/src/test/fuzz/util/net.h b/src/test/fuzz/util/net.h index 698001a7f1..381103aa8b 100644 --- a/src/test/fuzz/util/net.h +++ b/src/test/fuzz/util/net.h @@ -239,6 +239,8 @@ auto ConsumeNode(FuzzedDataProvider& fuzzed_data_provider, const std::optional(); + NetPermissionFlags permission_flags = ConsumeWeakEnum(fuzzed_data_provider, ALL_NET_PERMISSION_FLAGS); if constexpr (ReturnUniquePtr) { return std::make_unique(node_id, @@ -250,6 +252,7 @@ auto ConsumeNode(FuzzedDataProvider& fuzzed_data_provider, const std::optional& nodes, PeerManager& peerman, Connm CAddress{}, /*addrNameIn=*/"", conn_type, - /*inbound_onion=*/inbound_onion}); + /*inbound_onion=*/inbound_onion, + /*network_key=*/0}); CNode& node = *nodes.back(); node.SetCommonVersion(PROTOCOL_VERSION); diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 62e541b5b3..08d952410f 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -67,7 +67,8 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) CAddress(), pszDest, ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false); + /*inbound_onion=*/false, + /*network_key=*/0); BOOST_CHECK(pnode1->IsFullOutboundConn() == true); BOOST_CHECK(pnode1->IsManualConn() == false); BOOST_CHECK(pnode1->IsBlockOnlyConn() == false); @@ -85,7 +86,8 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) CAddress(), pszDest, ConnectionType::INBOUND, - /*inbound_onion=*/false); + /*inbound_onion=*/false, + /*network_key=*/1); BOOST_CHECK(pnode2->IsFullOutboundConn() == false); BOOST_CHECK(pnode2->IsManualConn() == false); BOOST_CHECK(pnode2->IsBlockOnlyConn() == false); @@ -103,7 +105,8 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) CAddress(), pszDest, ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false); + /*inbound_onion=*/false, + /*network_key=*/2); BOOST_CHECK(pnode3->IsFullOutboundConn() == true); BOOST_CHECK(pnode3->IsManualConn() == false); BOOST_CHECK(pnode3->IsBlockOnlyConn() == false); @@ -121,7 +124,8 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) CAddress(), pszDest, ConnectionType::INBOUND, - /*inbound_onion=*/true); + /*inbound_onion=*/true, + /*network_key=*/3); BOOST_CHECK(pnode4->IsFullOutboundConn() == false); BOOST_CHECK(pnode4->IsManualConn() == false); BOOST_CHECK(pnode4->IsBlockOnlyConn() == false); @@ -613,7 +617,8 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test) CAddress{}, /*pszDest=*/std::string{}, ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false); + /*inbound_onion=*/false, + /*network_key=*/0); pnode->fSuccessfullyConnected.store(true); // the peer claims to be reaching us via IPv6 @@ -667,7 +672,8 @@ BOOST_AUTO_TEST_CASE(get_local_addr_for_peer_port) /*addrBindIn=*/CService{}, /*addrNameIn=*/std::string{}, /*conn_type_in=*/ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/0}; peer_out.fSuccessfullyConnected = true; peer_out.SetAddrLocal(peer_us); @@ -688,7 +694,8 @@ BOOST_AUTO_TEST_CASE(get_local_addr_for_peer_port) /*addrBindIn=*/CService{}, /*addrNameIn=*/std::string{}, /*conn_type_in=*/ConnectionType::INBOUND, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/1}; peer_in.fSuccessfullyConnected = true; peer_in.SetAddrLocal(peer_us); @@ -825,7 +832,8 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) /*addrBindIn=*/CService{}, /*addrNameIn=*/std::string{}, /*conn_type_in=*/ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false}; + /*inbound_onion=*/false, + /*network_key=*/2}; const uint64_t services{NODE_NETWORK | NODE_WITNESS}; const int64_t time{0}; @@ -900,7 +908,8 @@ BOOST_AUTO_TEST_CASE(advertise_local_address) CAddress{}, /*pszDest=*/std::string{}, ConnectionType::OUTBOUND_FULL_RELAY, - /*inbound_onion=*/false); + /*inbound_onion=*/false, + /*network_key=*/0); }; g_reachable_nets.Add(NET_CJDNS); From 81e5717c9ed2928bf54226290f024c0b1c5a268f Mon Sep 17 00:00:00 2001 From: Martin Zumsande Date: Wed, 17 Sep 2025 23:58:09 -0400 Subject: [PATCH 116/356] p2p: Use different inbound inv timer per network Currently nodes schedule their invs to all inbound peers at the same time. It is trivial to make use this timing pattern for fingerprinting identities on different networks. Using a separate timers for each network will make the fingerprinting harder. Github-Pull: #33464 Rebased-From: 0f7d4ee4e8281ed141a6ebb7e0edee7b864e4dcf --- src/net_processing.cpp | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 1da3ec9d21..69a7726e78 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -783,7 +783,7 @@ class PeerManagerImpl final : public PeerManager uint32_t GetFetchFlags(const Peer& peer) const; - std::atomic m_next_inv_to_inbounds{0us}; + std::map m_next_inv_to_inbounds_per_network_key GUARDED_BY(g_msgproc_mutex); /** Number of nodes with fSyncStarted. */ int nSyncStarted GUARDED_BY(cs_main) = 0; @@ -813,12 +813,14 @@ class PeerManagerImpl final : public PeerManager /** * For sending `inv`s to inbound peers, we use a single (exponentially - * distributed) timer for all peers. If we used a separate timer for each + * distributed) timer for all peers with the same network key. If we used a separate timer for each * peer, a spy node could make multiple inbound connections to us to - * accurately determine when we received the transaction (and potentially - * determine the transaction's origin). */ + * accurately determine when we received a transaction (and potentially + * determine the transaction's origin). Each network key has its own timer + * to make fingerprinting harder. */ std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, - std::chrono::seconds average_interval) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); + std::chrono::seconds average_interval, + uint64_t network_key) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex); // All of the following cache a recent block, and are protected by m_most_recent_block_mutex @@ -1117,15 +1119,15 @@ static bool CanServeWitnesses(const Peer& peer) } std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now, - std::chrono::seconds average_interval) + std::chrono::seconds average_interval, + uint64_t network_key) { - if (m_next_inv_to_inbounds.load() < now) { - // If this function were called from multiple threads simultaneously - // it would possible that both update the next send variable, and return a different result to their caller. - // This is not possible in practice as only the net processing thread invokes this function. - m_next_inv_to_inbounds = now + m_rng.rand_exp_duration(average_interval); + auto [it, inserted] = m_next_inv_to_inbounds_per_network_key.try_emplace(network_key, 0us); + auto& timer{it->second}; + if (timer < now) { + timer = now + m_rng.rand_exp_duration(average_interval); } - return m_next_inv_to_inbounds; + return timer; } bool PeerManagerImpl::IsBlockRequested(const uint256& hash) @@ -5679,7 +5681,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (tx_relay->m_next_inv_send_time < current_time) { fSendTrickle = true; if (pto->IsInboundConn()) { - tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); + tx_relay->m_next_inv_send_time = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL, pto->m_network_key); } else { tx_relay->m_next_inv_send_time = current_time + m_rng.rand_exp_duration(OUTBOUND_INVENTORY_BROADCAST_INTERVAL); } From d25f97ac3708ffcd25176baea05923af855226aa Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Wed, 8 Oct 2025 13:10:59 -0700 Subject: [PATCH 117/356] depends: Use $(package)_file_name when downloading from the fallback Github-Pull: #33580 Rebased-From: 671b774d1b58c491b53f2b2f6ee42fb6b65a0e71 --- depends/funcs.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/depends/funcs.mk b/depends/funcs.mk index 15e404e42d..585de62821 100644 --- a/depends/funcs.mk +++ b/depends/funcs.mk @@ -31,7 +31,7 @@ endef define fetch_file ( test -f $$($(1)_source_dir)/$(4) || \ ( $(call fetch_file_inner,$(1),$(2),$(3),$(4),$(5)) || \ - $(call fetch_file_inner,$(1),$(FALLBACK_DOWNLOAD_PATH),$(3),$(4),$(5)))) + $(call fetch_file_inner,$(1),$(FALLBACK_DOWNLOAD_PATH),$(4),$(4),$(5)))) endef define int_get_build_recipe_hash From 8546bc08b1b4b863a7c399265d3ae08f08d014cb Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 10 Oct 2025 12:47:05 +0000 Subject: [PATCH 118/356] Bugfix: torcontrol: Use ephemeral config file rather than stdin cpp-subprocess does not have a way to close stdin, so executing tor currently hangs --- src/torcontrol.cpp | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index eb97131448..0e7f39cca3 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -665,6 +665,17 @@ std::string TorController::LaunchTor() const fs::path controlport_env_filepath = tor_datadir / "controlport.env"; fs::remove(controlport_env_filepath); // may throw exceptions + fs::create_directories(tor_datadir); + const fs::path tor_config_filepath = tor_datadir / "generated_config"; + std::ofstream tor_config_file(tor_config_filepath); + tor_config_file << "# This config file is autogenerated at startup, DO NOT MODIFY!\n"; + tor_config_file << "SOCKSPort 0\n"; + tor_config_file << std::string{"DataDirectory "} + fs::PathToString(tor_datadir) + "\n"; + tor_config_file << "ControlPort auto\n"; + tor_config_file << std::string{"ControlPortWriteToFile "} + fs::PathToString(controlport_env_filepath) + "\n"; + tor_config_file << "CookieAuthentication 1\n"; + tor_config_file.close(); + if (m_process) { try { m_process->kill(); @@ -676,17 +687,13 @@ std::string TorController::LaunchTor() } try { - m_process = new subprocess::Popen(m_execute + " -f -", subprocess::input{subprocess::PIPE}, subprocess::close_fds{true}); + m_process = new subprocess::Popen(m_execute + " -f " + fs::PathToString(tor_config_filepath), subprocess::input{subprocess::PIPE}, subprocess::close_fds{true}); } catch (...) { LogDebug(BCLog::TOR, "tor: Failed to execute Tor process\n"); throw; } - m_process->send(std::string{"SOCKSPort 0\n"}); - m_process->send(std::string{"DataDirectory "} + fs::PathToString(tor_datadir) + "\n"); - m_process->send(std::string{"ControlPort auto\n"}); - m_process->send(std::string{"ControlPortWriteToFile "} + fs::PathToString(controlport_env_filepath) + "\n"); - m_process->send(std::string{"CookieAuthentication 1\n"}); + // FIXME: Timeout eventually? while (!fs::exists(controlport_env_filepath)) { if (m_process->poll() != -1) { LogDebug(BCLog::TOR, "tor: Tor process died before making control port file\n"); From 23071773f6c49f73c2f0d3088fe265134cd276c1 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 10 Oct 2025 13:41:51 +0000 Subject: [PATCH 119/356] Bugfix: net: Treat connections to the first normal bind as Tor when appropriate --- src/init.cpp | 16 +++++++++++++++- src/net.cpp | 21 ++++++++++++++++++++- src/net.h | 6 ++++++ test/functional/feature_proxy.py | 2 +- 4 files changed, 42 insertions(+), 3 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 70615a191b..cc66e3c741 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1985,17 +1985,31 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } } + connOptions.listenonion = args.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION); + CService onion_service_target; if (!connOptions.onion_binds.empty()) { onion_service_target = connOptions.onion_binds.front(); } else if (!connOptions.vBinds.empty()) { onion_service_target = connOptions.vBinds.front(); + if (connOptions.listenonion) { + std::string alternate_connections{"clearnet"}, only_from_localhost; + if (onion_service_target.IsBindAny()) { + only_from_localhost = " from localhost"; + } else if (onion_service_target.IsLocal()) { + alternate_connections = "local"; + } + InitWarning(strprintf(_("You are using a common listening port (%s) for both Tor and %s connections. All connections to this port%s will be assumed to be Tor connections, and will be denied any whitelist permissions. If this is not your intent, setup a separate -bind=[:]=onion configuration, or set -listenonion=0."), + onion_service_target.ToStringAddrPort(), + alternate_connections, + only_from_localhost)); + } } else { onion_service_target = DefaultOnionServiceTarget(default_bind_port_onion); connOptions.onion_binds.push_back(onion_service_target); } - if (args.GetBoolArg("-listenonion", DEFAULT_LISTEN_ONION)) { + if (connOptions.listenonion) { if (connOptions.onion_binds.size() > 1) { InitWarning(strprintf(_("More than one onion bind address is provided. Using %s " "for the automatically created Tor onion service."), diff --git a/src/net.cpp b/src/net.cpp index 7684877ec3..38788c61ea 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1767,7 +1767,26 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr&& sock, { int nInbound = 0; - const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); + const bool inbound_onion = [this, &addr, &addr_bind]{ + if (m_onion_binds.empty()) { + if (!m_listenonion) { + // If -listenonion=0, assume we do not have inbound Tor connections on non-onion listeners + return false; + } + // Tor connections are coming in on the first -bind + if ((!m_normal_binds.empty()) && addr_bind == m_normal_binds.front()) { + if (addr_bind.IsBindAny()) { + // Tor connections should have a source IP that is local + return addr.IsLocal(); + } + // Otherwise, the source IP is unpredictable, so assume anything could be onion + return true; + } + return false; + } else { + return std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end(); + } + }(); // Tor inbound connections do not reveal the peer's actual network address. // Therefore do not apply address-based whitelist permissions to them. diff --git a/src/net.h b/src/net.h index e025b20bcd..13ac5b890c 100644 --- a/src/net.h +++ b/src/net.h @@ -1069,6 +1069,7 @@ class CConnman std::vector vWhiteBinds; std::vector vBinds; std::vector onion_binds; + bool listenonion{false}; /// True if the user did not specify -bind= or -whitebind= and thus /// we should bind on `0.0.0.0` (IPv4) and `::` (IPv6). bool bind_on_any; @@ -1112,7 +1113,9 @@ class CConnman m_added_node_params.push_back({added_node, use_v2transport}); } } + m_normal_binds = connOptions.vBinds; m_onion_binds = connOptions.onion_binds; + m_listenonion = connOptions.listenonion; whitelist_forcerelay = connOptions.whitelist_forcerelay; whitelist_relay = connOptions.whitelist_relay; } @@ -1573,11 +1576,14 @@ class CConnman */ std::atomic_bool m_start_extra_block_relay_peers{false}; + std::vector m_normal_binds; + /** * A vector of -bind=
:=onion arguments each of which is * an address and port that are designated for incoming Tor connections. */ std::vector m_onion_binds; + bool m_listenonion; /** * flag for adding 'forcerelay' permission to whitelisted inbound diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index 644ee5cc7f..8d96640fe1 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -437,7 +437,7 @@ def networks_dict(d): self.log.info("Test passing -onlynet=onion without -proxy or -onion but with -listenonion=1 is ok") self.start_node(1, extra_args=["-onlynet=onion", "-listenonion=1"]) - self.stop_node(1) + self.stop_node(1, expected_stderr=f'Warning: You are using a common listening port (127.0.0.1:{p2p_port(1)}) for both Tor and local connections. All connections to this port will be assumed to be Tor connections, and will be denied any whitelist permissions. If this is not your intent, setup a separate -bind=[:]=onion configuration, or set -listenonion=0.') self.log.info("Test passing unknown network to -onlynet raises expected init error") self.nodes[1].extra_args = ["-onlynet=abc"] From c7979f429a86a2971a4ff024bd0e9cd7a6b7222f Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Wed, 8 Oct 2025 14:47:00 -0700 Subject: [PATCH 120/356] ci: Properly include $FILE_ENV in DEPENDS_HASH $FILE_ENV has a full relative path already, prepending with ci/test/ results in a non-existent path which means that DEPENDS_HASH was not actually committing to the test's environment file. Github-Pull: #33581 Rebased-From: ceeb53adcd0a6a87a65c8ebbb20472c15c502dfd --- .github/actions/configure-environment/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/configure-environment/action.yml b/.github/actions/configure-environment/action.yml index aae5016bdc..e2a26b7184 100644 --- a/.github/actions/configure-environment/action.yml +++ b/.github/actions/configure-environment/action.yml @@ -17,7 +17,7 @@ runs: - name: Set cache hashes shell: bash run: | - echo "DEPENDS_HASH=$(git ls-tree HEAD depends "ci/test/$FILE_ENV" | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV + echo "DEPENDS_HASH=$(git ls-tree HEAD depends "$FILE_ENV" | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV echo "PREVIOUS_RELEASES_HASH=$(git ls-tree HEAD test/get_previous_releases.py | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV - name: Get container name From 16e10f928cc65b2096046c8c5e0fe715fc5b9d72 Mon Sep 17 00:00:00 2001 From: willcl-ark Date: Tue, 30 Sep 2025 11:00:26 +0100 Subject: [PATCH 121/356] ci: expose all ACTIONS_* vars When using `docker buildx build` in conjunction with the `gha` backend cache type, it's important to specify the URL and TOKEN needed to authenticate. On Cirrus runners this is working with only `ACTIONS_CACHE_URL` and `ACTIONS_RUNTIME_TOKEN`, but this is not enough for the GitHub backend. Fix this by exporting all `ACTIONS_*` variables. This fixes cache restore/save on forks or where GH-hosted runners are being used. Github-Pull: #33508 Rebased-From: bc706955d740f8a59bec78e44d33e80d1cca373b --- .github/actions/configure-docker/action.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/actions/configure-docker/action.yml b/.github/actions/configure-docker/action.yml index c78df86b6c..131fdb1ccc 100644 --- a/.github/actions/configure-docker/action.yml +++ b/.github/actions/configure-docker/action.yml @@ -19,8 +19,12 @@ runs: uses: actions/github-script@v6 with: script: | - core.exportVariable('ACTIONS_CACHE_URL', process.env['ACTIONS_CACHE_URL']) - core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env['ACTIONS_RUNTIME_TOKEN']) + Object.keys(process.env).forEach(function (key) { + if (key.startsWith('ACTIONS_')) { + core.info(`Exporting ${key}`); + core.exportVariable(key, process.env[key]); + } + }); - name: Construct docker build cache args shell: bash From 554ff3f7f33651db3a7071d6b8dc5438a303ac03 Mon Sep 17 00:00:00 2001 From: Eugene Siegel Date: Mon, 13 Oct 2025 11:29:19 -0400 Subject: [PATCH 122/356] test: change log rate limit version gate from 299900 to 290100 Github-Pull: #33612 Rebased-From: 7b544341c0021dd713f05bc439ee190de911930c --- test/functional/test_framework/test_node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 47ae2cc22d..919d48b37a 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -137,7 +137,7 @@ def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, self.args.append("-logsourcelocations") if self.version_is_at_least(239000): self.args.append("-loglevel=trace") - if self.version_is_at_least(299900): + if self.version_is_at_least(290100): self.args.append("-nologratelimit") # Default behavior from global -v2transport flag is added to args to persist it over restarts. From 4917d0c0de50da204b002bd4ae0c53cafd268f0c Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 13 Oct 2025 16:21:50 +0100 Subject: [PATCH 123/356] doc: update release notes for 29.x --- doc/release-notes.md | 48 ++++++++------------------------------------ 1 file changed, 8 insertions(+), 40 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index b981b8a7f5..4e2071dfa6 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.2 is now available from: +Bitcoin Core version 29.x is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -37,55 +37,23 @@ unsupported systems. Notable changes =============== -### P2P +### Test -- #32646 p2p: Add witness mutation check inside FillBlock -- #33296 net: check for empty header before calling FillBlock -- #33395 net: do not apply whitelist permissions to onion inbounds - -### Mempool - -- #33504 mempool: Do not enforce TRUC checks on reorg - -### RPC - -- #33446 rpc: fix getblock(header) returns target for tip - -### CI - -- #32989 ci: Migrate CI to hosted Cirrus Runners -- #32999 ci: Use APT_LLVM_V in msan task -- #33099 ci: allow for any libc++ intrumentation & use it for TSAN -- #33258 ci: use LLVM 21 -- #33364 ci: always use tag for LLVM checkout - -### Doc - -- #33484 doc: rpc: fix case typo in `finalizepsbt` help +- #33612 test: change log rate limit version gate ### Misc -- #33310 trace: Workaround GCC bug compiling with old systemtap -- #33340 Fix benchmark CSV output -- #33482 contrib: fix macOS deployment with no translations +- #33508 ci: fix buildx gha cache authentication on forks +- #33581 ci: Properly include $FILE_ENV in DEPENDS_HASH Credits ======= Thanks to everyone who directly contributed to this release: -- Amisha Chhajed +- Ava Chow - Eugene Siegel -- fanquake -- Greg Sanders -- Hennadii Stepanov -- Luke Dashjr -- MarcoFalke -- Martin Zumsande -- Sebastian Falbesoner -- Sjors Provoost -- Vasil Dimov -- Will Clark +- willcl-ark As well as to everyone that helped with translations on [Transifex](https://explore.transifex.com/bitcoin/bitcoin/). From 5a0506eea03e423121dd2112c2ba5fb4320022e3 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 25 Apr 2025 16:13:25 -0400 Subject: [PATCH 124/356] tests: add sighash caching tests to feature_taproot Github-Pull: #32473 Rebased-From: 9014d4016ad9351cb59b587541895e55f5d589cc --- test/functional/feature_taproot.py | 98 ++++++++++++++++++++++++++++-- 1 file changed, 93 insertions(+), 5 deletions(-) diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py index 4acb7524fb..198bec7df5 100755 --- a/test/functional/feature_taproot.py +++ b/test/functional/feature_taproot.py @@ -71,6 +71,7 @@ OP_PUSHDATA1, OP_RETURN, OP_SWAP, + OP_TUCK, OP_VERIFY, SIGHASH_DEFAULT, SIGHASH_ALL, @@ -171,9 +172,9 @@ def get(ctx, name): ctx[name] = expr return expr.value -def getter(name): +def getter(name, **kwargs): """Return a callable that evaluates name in its passed context.""" - return lambda ctx: get(ctx, name) + return lambda ctx: get({**ctx, **kwargs}, name) def override(expr, **kwargs): """Return a callable that evaluates expr in a modified context.""" @@ -217,6 +218,20 @@ def default_controlblock(ctx): """Default expression for "controlblock": combine leafversion, negflag, pubkey_internal, merklebranch.""" return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_internal") + get(ctx, "merklebranch") +def default_scriptcode_suffix(ctx): + """Default expression for "scriptcode_suffix", the actually used portion of the scriptcode.""" + scriptcode = get(ctx, "scriptcode") + codesepnum = get(ctx, "codesepnum") + if codesepnum == -1: + return scriptcode + codeseps = 0 + for (opcode, data, sop_idx) in scriptcode.raw_iter(): + if opcode == OP_CODESEPARATOR: + if codeseps == codesepnum: + return CScript(scriptcode[sop_idx+1:]) + codeseps += 1 + assert False + def default_sigmsg(ctx): """Default expression for "sigmsg": depending on mode, compute BIP341, BIP143, or legacy sigmsg.""" tx = get(ctx, "tx") @@ -236,12 +251,12 @@ def default_sigmsg(ctx): return TaprootSignatureMsg(tx, utxos, hashtype, idx, scriptpath=False, annex=annex) elif mode == "witv0": # BIP143 signature hash - scriptcode = get(ctx, "scriptcode") + scriptcode = get(ctx, "scriptcode_suffix") utxos = get(ctx, "utxos") return SegwitV0SignatureMsg(scriptcode, tx, idx, hashtype, utxos[idx].nValue) else: # Pre-segwit signature hash - scriptcode = get(ctx, "scriptcode") + scriptcode = get(ctx, "scriptcode_suffix") return LegacySignatureMsg(scriptcode, tx, idx, hashtype)[0] def default_sighash(ctx): @@ -301,7 +316,12 @@ def default_hashtype_actual(ctx): def default_bytes_hashtype(ctx): """Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise.""" - return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0]) + mode = get(ctx, "mode") + hashtype_actual = get(ctx, "hashtype_actual") + if mode != "taproot" or hashtype_actual != 0: + return bytes([hashtype_actual]) + else: + return bytes() def default_sign(ctx): """Default expression for "sign": concatenation of signature and bytes_hashtype.""" @@ -379,6 +399,8 @@ def default_scriptsig(ctx): "key_tweaked": default_key_tweaked, # The tweak to use (None for script path spends, the actual tweak for key path spends). "tweak": default_tweak, + # The part of the scriptcode after the last executed OP_CODESEPARATOR. + "scriptcode_suffix": default_scriptcode_suffix, # The sigmsg value (preimage of sighash) "sigmsg": default_sigmsg, # The sighash value (32 bytes) @@ -409,6 +431,8 @@ def default_scriptsig(ctx): "annex": None, # The codeseparator position (only when mode=="taproot"). "codeseppos": -1, + # Which OP_CODESEPARATOR is the last executed one in the script (in legacy/P2SH/P2WSH). + "codesepnum": -1, # The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH). "script_p2sh": None, # The script to add to the witness in (if P2WSH; None implies P2WPKH) @@ -1210,6 +1234,70 @@ def predict_sigops_ratio(n, dummy_size): standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0) add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE) + # == sighash caching tests == + + # Sighash caching in legacy. + for p2sh in [False, True]: + for witv0 in [False, True]: + eckey1, pubkey1 = generate_keypair(compressed=compressed) + for _ in range(10): + # Construct a script with 20 checksig operations (10 sighash types, each 2 times), + # randomly ordered and interleaved with 4 OP_CODESEPARATORS. + ops = [1, 2, 3, 0x21, 0x42, 0x63, 0x81, 0x83, 0xe1, 0xc2, -1, -1] * 2 + # Make sure no OP_CODESEPARATOR appears last. + while True: + random.shuffle(ops) + if ops[-1] != -1: + break + script = [pubkey1] + inputs = [] + codeseps = -1 + for pos, op in enumerate(ops): + if op == -1: + codeseps += 1 + script.append(OP_CODESEPARATOR) + elif pos + 1 != len(ops): + script += [OP_TUCK, OP_CHECKSIGVERIFY] + inputs.append(getter("sign", codesepnum=codeseps, hashtype=op)) + else: + script += [OP_CHECKSIG] + inputs.append(getter("sign", codesepnum=codeseps, hashtype=op)) + inputs.reverse() + script = CScript(script) + add_spender(spenders, "sighashcache/legacy", p2sh=p2sh, witv0=witv0, standard=False, script=script, inputs=inputs, key=eckey1, sigops_weight=12*8*(4-3*witv0), no_fail=True) + + # Sighash caching in tapscript. + for _ in range(10): + # Construct a script with 700 checksig operations (7 sighash types, each 100 times), + # randomly ordered and interleaved with 100 OP_CODESEPARATORS. + ops = [0, 1, 2, 3, 0x81, 0x82, 0x83, -1] * 100 + # Make sure no OP_CODESEPARATOR appears last. + while True: + random.shuffle(ops) + if ops[-1] != -1: + break + script = [pubs[1]] + inputs = [] + opcount = 1 + codeseppos = -1 + for pos, op in enumerate(ops): + if op == -1: + codeseppos = opcount + opcount += 1 + script.append(OP_CODESEPARATOR) + elif pos + 1 != len(ops): + opcount += 2 + script += [OP_TUCK, OP_CHECKSIGVERIFY] + inputs.append(getter("sign", codeseppos=codeseppos, hashtype=op)) + else: + opcount += 1 + script += [OP_CHECKSIG] + inputs.append(getter("sign", codeseppos=codeseppos, hashtype=op)) + inputs.reverse() + script = CScript(script) + tap = taproot_construct(pubs[0], [("leaf", script)]) + add_spender(spenders, "sighashcache/taproot", tap=tap, leaf="leaf", inputs=inputs, standard=True, key=secs[1], no_fail=True) + return spenders From 354d46bc10c61c45140be7a425c5c29fed934d32 Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 25 Apr 2025 13:11:30 -0400 Subject: [PATCH 125/356] script: (refactor) prepare for introducing sighash midstate cache Github-Pull: #32473 Rebased-From: 8f3ddb0bccebc930836b4a6745a7cf29b41eb302 --- src/script/interpreter.cpp | 44 +++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index a35306b693..0e304973a9 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -1569,6 +1569,18 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn { assert(nIn < txTo.vin.size()); + if (sigversion != SigVersion::WITNESS_V0) { + // Check for invalid use of SIGHASH_SINGLE + if ((nHashType & 0x1f) == SIGHASH_SINGLE) { + if (nIn >= txTo.vout.size()) { + // nOut out of range + return uint256::ONE; + } + } + } + + HashWriter ss{}; + if (sigversion == SigVersion::WITNESS_V0) { uint256 hashPrevouts; uint256 hashSequence; @@ -1583,16 +1595,14 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn hashSequence = cacheready ? cache->hashSequence : SHA256Uint256(GetSequencesSHA256(txTo)); } - if ((nHashType & 0x1f) != SIGHASH_SINGLE && (nHashType & 0x1f) != SIGHASH_NONE) { hashOutputs = cacheready ? cache->hashOutputs : SHA256Uint256(GetOutputsSHA256(txTo)); } else if ((nHashType & 0x1f) == SIGHASH_SINGLE && nIn < txTo.vout.size()) { - HashWriter ss{}; - ss << txTo.vout[nIn]; - hashOutputs = ss.GetHash(); + HashWriter inner_ss{}; + inner_ss << txTo.vout[nIn]; + hashOutputs = inner_ss.GetHash(); } - HashWriter ss{}; // Version ss << txTo.version; // Input prevouts/nSequence (none/all, depending on flags) @@ -1609,26 +1619,16 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn ss << hashOutputs; // Locktime ss << txTo.nLockTime; - // Sighash type - ss << nHashType; - - return ss.GetHash(); - } + } else { + // Wrapper to serialize only the necessary parts of the transaction being signed + CTransactionSignatureSerializer txTmp(txTo, scriptCode, nIn, nHashType); - // Check for invalid use of SIGHASH_SINGLE - if ((nHashType & 0x1f) == SIGHASH_SINGLE) { - if (nIn >= txTo.vout.size()) { - // nOut out of range - return uint256::ONE; - } + // Serialize + ss << txTmp; } - // Wrapper to serialize only the necessary parts of the transaction being signed - CTransactionSignatureSerializer txTmp(txTo, scriptCode, nIn, nHashType); - - // Serialize and hash - HashWriter ss{}; - ss << txTmp << nHashType; + // Add sighash type and hash. + ss << nHashType; return ss.GetHash(); } From 97088fa75aa0af5355587ce3522320f459e35204 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Mon, 4 Aug 2025 14:06:27 -0400 Subject: [PATCH 126/356] qa: test witness stripping in p2p_segwit A stripped witness is detected as a special case in mempool acceptance to make sure we do not add the wtxid (which is =txid since witness is stripped) to the reject filter. This is because it may interfere with 1p1c parent relay which currently uses orphan reconciliation (and originally it was until wtxid-relay was widely adopted on the network. This commit adds a test for this special case in the p2p_segwit function test, both when spending a native segwit output and when spending a p2sh-wrapped segwit output. Thanks to Eugene Siegel for pointing out the p2sh-wrapped detection did not have test coverage by finding a bug in a related patch of mine. Github-Pull: #33105 Rebased-From: eb073209db9efdbc2c94bc1f535a27ec6b20d954 --- test/functional/p2p_segwit.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 9caf5a19aa..e8f7f7e0f4 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -707,6 +707,12 @@ def test_p2sh_witness(self): expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)']): test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) + # The transaction was detected as witness stripped above and not added to the reject + # filter. Trying again will check it again and result in the same error. + with self.nodes[0].assert_debug_log( + expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)']): + test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) + # Try to put the witness script in the scriptSig, should also fail. spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a']) spend_tx.rehash() @@ -1282,6 +1288,13 @@ def test_tx_relay_after_segwit_activation(self): test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True) test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False) + # Now do the opposite: strip the witness entirely. This will be detected as witness stripping and + # the (w)txid won't be added to the reject filter: we can try again and get the same error. + tx3.wit.vtxinwit[0].scriptWitness.stack = [] + reason = "was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)" + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=False, accepted=False, reason=reason) + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=False, accepted=False, reason=reason) + # Get rid of the extra witness, and verify acceptance. tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script] # Also check that old_node gets a tx announcement, even though this is From ddfb9150b80c0c692c06b91cefa988c7773b15ff Mon Sep 17 00:00:00 2001 From: Pieter Wuille Date: Fri, 25 Apr 2025 13:31:18 -0400 Subject: [PATCH 127/356] script: (optimization) introduce sighash midstate caching Github-Pull: #32473 Rebased-From: 92af9f74d74e76681f7d98f293eab226972137b4 --- src/script/interpreter.cpp | 43 ++++++++++++++++++++++++++++++++++++-- src/script/interpreter.h | 22 ++++++++++++++++++- 2 files changed, 62 insertions(+), 3 deletions(-) diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 0e304973a9..4b7bfcedc6 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -1564,8 +1564,35 @@ bool SignatureHashSchnorr(uint256& hash_out, ScriptExecutionData& execdata, cons return true; } +int SigHashCache::CacheIndex(int32_t hash_type) const noexcept +{ + // Note that we do not distinguish between BASE and WITNESS_V0 to determine the cache index, + // because no input can simultaneously use both. + return 3 * !!(hash_type & SIGHASH_ANYONECANPAY) + + 2 * ((hash_type & 0x1f) == SIGHASH_SINGLE) + + 1 * ((hash_type & 0x1f) == SIGHASH_NONE); +} + +bool SigHashCache::Load(int32_t hash_type, const CScript& script_code, HashWriter& writer) const noexcept +{ + auto& entry = m_cache_entries[CacheIndex(hash_type)]; + if (entry.has_value()) { + if (script_code == entry->first) { + writer = HashWriter(entry->second); + return true; + } + } + return false; +} + +void SigHashCache::Store(int32_t hash_type, const CScript& script_code, const HashWriter& writer) noexcept +{ + auto& entry = m_cache_entries[CacheIndex(hash_type)]; + entry.emplace(script_code, writer); +} + template -uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache) +uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache, SigHashCache* sighash_cache) { assert(nIn < txTo.vin.size()); @@ -1581,6 +1608,13 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn HashWriter ss{}; + // Try to compute using cached SHA256 midstate. + if (sighash_cache && sighash_cache->Load(nHashType, scriptCode, ss)) { + // Add sighash type and hash. + ss << nHashType; + return ss.GetHash(); + } + if (sigversion == SigVersion::WITNESS_V0) { uint256 hashPrevouts; uint256 hashSequence; @@ -1627,6 +1661,11 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn ss << txTmp; } + // If a cache object was provided, store the midstate there. + if (sighash_cache != nullptr) { + sighash_cache->Store(nHashType, scriptCode, ss); + } + // Add sighash type and hash. ss << nHashType; return ss.GetHash(); @@ -1661,7 +1700,7 @@ bool GenericTransactionSignatureChecker::CheckECDSASignature(const std::vecto // Witness sighashes need the amount. if (sigversion == SigVersion::WITNESS_V0 && amount < 0) return HandleMissingData(m_mdb); - uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, this->txdata); + uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, this->txdata, &m_sighash_cache); if (!VerifyECDSASignature(vchSig, pubkey, sighash)) return false; diff --git a/src/script/interpreter.h b/src/script/interpreter.h index e2fb1998f0..d613becb8f 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -239,8 +239,27 @@ extern const HashWriter HASHER_TAPSIGHASH; //!< Hasher with tag "TapSighash" pre extern const HashWriter HASHER_TAPLEAF; //!< Hasher with tag "TapLeaf" pre-fed to it. extern const HashWriter HASHER_TAPBRANCH; //!< Hasher with tag "TapBranch" pre-fed to it. +/** Data structure to cache SHA256 midstates for the ECDSA sighash calculations + * (bare, P2SH, P2WPKH, P2WSH). */ +class SigHashCache +{ + /** For each sighash mode (ALL, SINGLE, NONE, ALL|ANYONE, SINGLE|ANYONE, NONE|ANYONE), + * optionally store a scriptCode which the hash is for, plus a midstate for the SHA256 + * computation just before adding the hash_type itself. */ + std::optional> m_cache_entries[6]; + + /** Given a hash_type, find which of the 6 cache entries is to be used. */ + int CacheIndex(int32_t hash_type) const noexcept; + +public: + /** Load into writer the SHA256 midstate if found in this cache. */ + [[nodiscard]] bool Load(int32_t hash_type, const CScript& script_code, HashWriter& writer) const noexcept; + /** Store into this cache object the provided SHA256 midstate. */ + void Store(int32_t hash_type, const CScript& script_code, const HashWriter& writer) noexcept; +}; + template -uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr); +uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr, SigHashCache* sighash_cache = nullptr); class BaseSignatureChecker { @@ -289,6 +308,7 @@ class GenericTransactionSignatureChecker : public BaseSignatureChecker unsigned int nIn; const CAmount amount; const PrecomputedTransactionData* txdata; + mutable SigHashCache m_sighash_cache; protected: virtual bool VerifyECDSASignature(const std::vector& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const; From 65bcbbc538234957b1f7f76b2f21ad7c138efb87 Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Wed, 23 Jul 2025 10:50:33 +1000 Subject: [PATCH 128/356] net_processing: drop MaybePunishNodeForTx Do not discourage nodes even when they send us consensus invalid transactions. Because we do not discourage nodes for transactions we consider non-standard, we don't get any DoS protection from this check in adversarial scenarios, so remove the check entirely both to simplify the code and reduce the risk of splitting the network due to changes in tx relay policy. NOTE: Backport required additional adjustment in test/functional/p2p_invalid_tx Github-Pull: #33050 Rebased-From: 266dd0e10d08c0bfde63205db15d6c210a021b90 --- src/net_processing.cpp | 34 ----------------------- test/functional/data/invalid_txs.py | 20 ++++++------- test/functional/p2p_invalid_tx.py | 5 ++-- test/functional/p2p_opportunistic_1p1c.py | 6 ++-- 4 files changed, 16 insertions(+), 49 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 1da3ec9d21..b25819c821 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -553,12 +553,6 @@ class PeerManagerImpl final : public PeerManager bool via_compact_block, const std::string& message = "") EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - /** - * Potentially disconnect and discourage a node based on the contents of a TxValidationState object - */ - void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state) - EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex); - /** Maybe disconnect a peer and discourage future connections from its address. * * @param[in] pnode The node to check. @@ -1805,32 +1799,6 @@ void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati } } -void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state) -{ - PeerRef peer{GetPeerRef(nodeid)}; - switch (state.GetResult()) { - case TxValidationResult::TX_RESULT_UNSET: - break; - // The node is providing invalid data: - case TxValidationResult::TX_CONSENSUS: - if (peer) Misbehaving(*peer, ""); - return; - // Conflicting (but not necessarily invalid) data or different policy: - case TxValidationResult::TX_INPUTS_NOT_STANDARD: - case TxValidationResult::TX_NOT_STANDARD: - case TxValidationResult::TX_MISSING_INPUTS: - case TxValidationResult::TX_PREMATURE_SPEND: - case TxValidationResult::TX_WITNESS_MUTATED: - case TxValidationResult::TX_WITNESS_STRIPPED: - case TxValidationResult::TX_CONFLICT: - case TxValidationResult::TX_MEMPOOL_POLICY: - case TxValidationResult::TX_NO_MEMPOOL: - case TxValidationResult::TX_RECONSIDERABLE: - case TxValidationResult::TX_UNKNOWN: - break; - } -} - bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex) { AssertLockHeld(cs_main); @@ -2987,8 +2955,6 @@ std::optional PeerManagerImpl::ProcessInvalidTx(NodeId if (peer) AddKnownTx(*peer, parent_txid); } - MaybePunishNodeForTx(nodeid, state); - return package_to_validate; } diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py index d2d7202d86..48ec88fde0 100644 --- a/test/functional/data/invalid_txs.py +++ b/test/functional/data/invalid_txs.py @@ -89,7 +89,7 @@ def get_tx(self, *args, **kwargs): class OutputMissing(BadTxTemplate): reject_reason = "bad-txns-vout-empty" - expect_disconnect = True + expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -100,7 +100,7 @@ def get_tx(self): class InputMissing(BadTxTemplate): reject_reason = "bad-txns-vin-empty" - expect_disconnect = True + expect_disconnect = False # We use a blank transaction here to make sure # it is interpreted as a non-witness transaction. @@ -149,7 +149,7 @@ def get_tx(self): class DuplicateInput(BadTxTemplate): reject_reason = 'bad-txns-inputs-duplicate' - expect_disconnect = True + expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -162,7 +162,7 @@ def get_tx(self): class PrevoutNullInput(BadTxTemplate): reject_reason = 'bad-txns-prevout-null' - expect_disconnect = True + expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -188,7 +188,7 @@ def get_tx(self): class SpendTooMuch(BadTxTemplate): reject_reason = 'bad-txns-in-belowout' - expect_disconnect = True + expect_disconnect = False def get_tx(self): return create_tx_with_script( @@ -197,7 +197,7 @@ def get_tx(self): class CreateNegative(BadTxTemplate): reject_reason = 'bad-txns-vout-negative' - expect_disconnect = True + expect_disconnect = False def get_tx(self): return create_tx_with_script(self.spend_tx, 0, amount=-1) @@ -205,7 +205,7 @@ def get_tx(self): class CreateTooLarge(BadTxTemplate): reject_reason = 'bad-txns-vout-toolarge' - expect_disconnect = True + expect_disconnect = False def get_tx(self): return create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY + 1) @@ -213,7 +213,7 @@ def get_tx(self): class CreateSumTooLarge(BadTxTemplate): reject_reason = 'bad-txns-txouttotal-toolarge' - expect_disconnect = True + expect_disconnect = False def get_tx(self): tx = create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY) @@ -224,7 +224,7 @@ def get_tx(self): class InvalidOPIFConstruction(BadTxTemplate): reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)" - expect_disconnect = True + expect_disconnect = False valid_in_block = True def get_tx(self): @@ -266,7 +266,7 @@ def get_tx(self): class NonStandardAndInvalid(BadTxTemplate): """A non-standard transaction which is also consensus-invalid should return the consensus error.""" reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)" - expect_disconnect = True + expect_disconnect = False valid_in_block = False def get_tx(self): diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index ee8c6c16ca..3785f725fe 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -73,7 +73,7 @@ def run_test(self): tx = template.get_tx() node.p2ps[0].send_txs_and_test( [tx], node, success=False, - expect_disconnect=template.expect_disconnect, + expect_disconnect=False, reject_reason=template.reject_reason, ) @@ -144,7 +144,6 @@ def run_test(self): # tx_orphan_2_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx) # tx_orphan_2_invalid, because it has negative fee (p2ps[1] is disconnected for relaying that tx) - self.wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected assert_equal(expected_mempool, set(node.getrawmempool())) self.log.info('Test orphan pool overflow') @@ -165,7 +164,7 @@ def run_test(self): node.p2ps[0].send_txs_and_test([rejected_parent], node, success=False) self.log.info('Test that a peer disconnection causes erase its transactions from the orphan pool') - with node.assert_debug_log(['Erased 100 orphan transaction(s) from peer=26']): + with node.assert_debug_log(['Erased 100 orphan transaction(s) from peer=']): self.reconnect_p2p(num_connections=1) self.log.info('Test that a transaction in the orphan pool is included in a new tip block causes erase this transaction from the orphan pool') diff --git a/test/functional/p2p_opportunistic_1p1c.py b/test/functional/p2p_opportunistic_1p1c.py index 5fdbf74a57..def70b733a 100755 --- a/test/functional/p2p_opportunistic_1p1c.py +++ b/test/functional/p2p_opportunistic_1p1c.py @@ -251,8 +251,10 @@ def test_orphan_consensus_failure(self): assert tx_orphan_bad_wit.rehash() not in node_mempool # 5. Have the other peer send the tx too, so that tx_orphan_bad_wit package is attempted. - bad_orphan_sender.send_message(msg_tx(low_fee_parent["tx"])) - bad_orphan_sender.wait_for_disconnect() + bad_orphan_sender.send_and_ping(msg_tx(low_fee_parent["tx"])) + + # The bad orphan sender should not be disconnected. + bad_orphan_sender.sync_with_ping() # The peer that didn't provide the orphan should not be disconnected. parent_sender.sync_with_ping() From 56626300b80dced9e111a39d5c560b0b81276cb8 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Mon, 4 Aug 2025 13:11:33 -0400 Subject: [PATCH 129/356] policy: introduce a helper to detect whether a transaction spends Segwit outputs We will use this helper in later commits to detect witness stripping without having to execute every input Script three times in a row. Github-Pull: #33105 Rebased-From: 2907b58834ab011f7dd0c42d323e440abd227c25 --- src/policy/policy.cpp | 36 ++++++++ src/policy/policy.h | 5 ++ src/test/transaction_tests.cpp | 155 +++++++++++++++++++++++++++++++++ 3 files changed, 196 insertions(+) diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index 545387d150..5942747d60 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -344,6 +344,42 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) return true; } +bool SpendsNonAnchorWitnessProg(const CTransaction& tx, const CCoinsViewCache& prevouts) +{ + if (tx.IsCoinBase()) { + return false; + } + + int version; + std::vector program; + for (const auto& txin: tx.vin) { + const auto& prev_spk{prevouts.AccessCoin(txin.prevout).out.scriptPubKey}; + + // Note this includes not-yet-defined witness programs. + if (prev_spk.IsWitnessProgram(version, program) && !prev_spk.IsPayToAnchor(version, program)) { + return true; + } + + // For P2SH extract the redeem script and check if it spends a non-Taproot witness program. Note + // this is fine to call EvalScript (as done in AreInputsStandard/IsWitnessStandard) because this + // function is only ever called after IsStandardTx, which checks the scriptsig is pushonly. + if (prev_spk.IsPayToScriptHash()) { + // If EvalScript fails or results in an empty stack, the transaction is invalid by consensus. + std::vector > stack; + if (!EvalScript(stack, txin.scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker{}, SigVersion::BASE) + || stack.empty()) { + continue; + } + const CScript redeem_script{stack.back().begin(), stack.back().end()}; + if (redeem_script.IsWitnessProgram(version, program)) { + return true; + } + } + } + + return false; +} + int64_t GetVirtualTransactionSize(int64_t nWeight, int64_t nSigOpCost, unsigned int bytes_per_sigop) { return (std::max(nWeight, nSigOpCost * bytes_per_sigop) + WITNESS_SCALE_FACTOR - 1) / WITNESS_SCALE_FACTOR; diff --git a/src/policy/policy.h b/src/policy/policy.h index bf6224af3d..a6ce608bcf 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -167,6 +167,11 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) * Also enforce a maximum stack item size limit and no annexes for tapscript spends. */ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs); +/** + * Check whether this transaction spends any witness program but P2A, including not-yet-defined ones. + * May return `false` early for consensus-invalid transactions. + */ +bool SpendsNonAnchorWitnessProg(const CTransaction& tx, const CCoinsViewCache& prevouts); /** Compute the virtual transaction size (weight reinterpreted as bytes). */ int64_t GetVirtualTransactionSize(int64_t nWeight, int64_t nSigOpCost, unsigned int bytes_per_sigop); diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index 2db30e2033..5844ab23bc 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -1144,4 +1144,159 @@ BOOST_AUTO_TEST_CASE(max_standard_legacy_sigops) BOOST_CHECK(!::AreInputsStandard(CTransaction(tx_max_sigops), coins)); } +/** Sanity check the return value of SpendsNonAnchorWitnessProg for various output types. */ +BOOST_AUTO_TEST_CASE(spends_witness_prog) +{ + CCoinsView coins_dummy; + CCoinsViewCache coins(&coins_dummy); + CKey key; + key.MakeNewKey(true); + const CPubKey pubkey{key.GetPubKey()}; + CMutableTransaction tx_create{}, tx_spend{}; + tx_create.vout.emplace_back(0, CScript{}); + tx_spend.vin.emplace_back(Txid{}, 0); + std::vector> sol_dummy; + + // CNoDestination, PubKeyDestination, PKHash, ScriptHash, WitnessV0ScriptHash, WitnessV0KeyHash, + // WitnessV1Taproot, PayToAnchor, WitnessUnknown. + static_assert(std::variant_size_v == 9); + + // Go through all defined output types and sanity check SpendsNonAnchorWitnessProg. + + // P2PK + tx_create.vout[0].scriptPubKey = GetScriptForDestination(PubKeyDestination{pubkey}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::PUBKEY); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2PKH + tx_create.vout[0].scriptPubKey = GetScriptForDestination(PKHash{pubkey}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::PUBKEYHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH + auto redeem_script{CScript{} << OP_1 << OP_CHECKSIG}; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash{redeem_script}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << OP_0 << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + + // native P2WSH + const auto witness_script{CScript{} << OP_12 << OP_HASH160 << OP_DUP << OP_EQUAL}; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash{witness_script}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V0_SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2WSH + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // native P2WPKH + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV0KeyHash{pubkey}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V0_KEYHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2WPKH + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2TR + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV1Taproot{XOnlyPubKey{pubkey}}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V1_TAPROOT); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2TR (undefined, non-standard) + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2A + tx_create.vout[0].scriptPubKey = GetScriptForDestination(PayToAnchor{}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::ANCHOR); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped P2A (undefined, non-standard) + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + + // Undefined version 1 witness program + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessUnknown{1, {0x42, 0x42}}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_UNKNOWN); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // P2SH-wrapped undefined version 1 witness program + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // Various undefined version >1 32-byte witness programs. + const auto program{ToByteVector(XOnlyPubKey{pubkey})}; + for (int i{2}; i <= 16; ++i) { + tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessUnknown{i, program}); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_UNKNOWN); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + + // It's also detected within P2SH. + redeem_script = tx_create.vout[0].scriptPubKey; + tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script)); + BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH); + tx_spend.vin[0].prevout.hash = tx_create.GetHash(); + tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script); + AddCoins(coins, CTransaction{tx_create}, 0, false); + BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + tx_spend.vin[0].scriptSig.clear(); + BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins)); + } +} + BOOST_AUTO_TEST_SUITE_END() From 73d3ab8fc93119f14f72a6c5f3cdd9eedcb36a20 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Tue, 22 Jul 2025 18:40:23 -0400 Subject: [PATCH 130/356] qa: simple differential fuzzing for sighash with/without caching Github-Pull: #32473 Rebased-From: b221aa80a081579b8d3b460e3403f7ac0daa7139 --- src/test/fuzz/script_interpreter.cpp | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/test/fuzz/script_interpreter.cpp b/src/test/fuzz/script_interpreter.cpp index 9e3ad02b2e..2c2ce855d4 100644 --- a/src/test/fuzz/script_interpreter.cpp +++ b/src/test/fuzz/script_interpreter.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -45,3 +46,27 @@ FUZZ_TARGET(script_interpreter) (void)CastToBool(ConsumeRandomLengthByteVector(fuzzed_data_provider)); } } + +/** Differential fuzzing for SignatureHash with and without cache. */ +FUZZ_TARGET(sighash_cache) +{ + FuzzedDataProvider provider(buffer.data(), buffer.size()); + + // Get inputs to the sighash function that won't change across types. + const auto scriptcode{ConsumeScript(provider)}; + const auto tx{ConsumeTransaction(provider, std::nullopt)}; + if (tx.vin.empty()) return; + const auto in_index{provider.ConsumeIntegralInRange(0, tx.vin.size() - 1)}; + const auto amount{ConsumeMoney(provider)}; + const auto sigversion{(SigVersion)provider.ConsumeIntegralInRange(0, 1)}; + + // Check the sighash function will give the same result for 100 fuzzer-generated hash types whether or not a cache is + // provided. The cache is conserved across types to exercise cache hits. + SigHashCache sighash_cache{}; + for (int i{0}; i < 100; ++i) { + const auto hash_type{((i & 2) == 0) ? provider.ConsumeIntegral() : provider.ConsumeIntegral()}; + const auto nocache_res{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion)}; + const auto cache_res{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &sighash_cache)}; + Assert(nocache_res == cache_res); + } +} From be0857745a5a0154d89a2aa9ddaa2a84e912598a Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Wed, 23 Jul 2025 10:51:06 +1000 Subject: [PATCH 131/356] validation: only check input scripts once Previously, we would check failing input scripts twice when considering a transaction for the mempool, in order to distinguish policy failures from consensus failures. This allowed us both to provide a different error message and to discourage peers for consensus failures. Because we are no longer discouraging peers for consensus failures during tx relay, and because checking a script can be expensive, only do this once. Also renames non-mandatory-script-verify-flag error to mempool-script-verify-flag-failed. NOTE: Backport required additional adjustment in test/functional/feature_block Github-Pull: #33050 Rebased-From: b29ae9efdfeeff774e32ee433ce67d8ed8ecd49f --- src/validation.cpp | 35 +++++++--------------------- test/functional/data/invalid_txs.py | 7 +++--- test/functional/feature_block.py | 5 +++- test/functional/feature_cltv.py | 18 +++++++------- test/functional/feature_dersig.py | 4 ++-- test/functional/feature_nulldummy.py | 12 +++++----- test/functional/feature_segwit.py | 24 +++++++++---------- test/functional/mempool_accept.py | 2 +- test/functional/p2p_segwit.py | 14 +++++------ test/functional/rpc_packages.py | 4 ++-- 10 files changed, 57 insertions(+), 68 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 36734bc612..ebeb67ac78 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2207,34 +2207,17 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, if (pvChecks) { pvChecks->emplace_back(std::move(check)); } else if (auto result = check(); result.has_value()) { + // Tx failures never trigger disconnections/bans. + // This is so that network splits aren't triggered + // either due to non-consensus relay policies (such as + // non-standard DER encodings or non-null dummy + // arguments) or due to new consensus rules introduced in + // soft forks. if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) { - // Check whether the failure was caused by a - // non-mandatory script verification check, such as - // non-standard DER encodings or non-null dummy - // arguments; if so, ensure we return NOT_STANDARD - // instead of CONSENSUS to avoid downstream users - // splitting the network between upgraded and - // non-upgraded nodes by banning CONSENSUS-failing - // data providers. - CScriptCheck check2(txdata.m_spent_outputs[i], tx, validation_cache.m_signature_cache, i, - flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata); - auto mandatory_result = check2(); - if (!mandatory_result.has_value()) { - return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(result->first)), result->second); - } else { - // If the second check failed, it failed due to a mandatory script verification - // flag, but the first check might have failed on a non-mandatory script - // verification flag. - // - // Avoid reporting a mandatory script check failure with a non-mandatory error - // string by reporting the error from the second check. - result = mandatory_result; - } + return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("mempool-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second); + } else { + return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second); } - - // MANDATORY flag failures correspond to - // TxValidationResult::TX_CONSENSUS. - return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second); } } diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py index 48ec88fde0..bb1931be2d 100644 --- a/test/functional/data/invalid_txs.py +++ b/test/functional/data/invalid_txs.py @@ -223,7 +223,7 @@ def get_tx(self): class InvalidOPIFConstruction(BadTxTemplate): - reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)" + reject_reason = "mempool-script-verify-flag-failed (Invalid OP_IF construction)" expect_disconnect = False valid_in_block = True @@ -264,8 +264,9 @@ def get_tx(self): }) class NonStandardAndInvalid(BadTxTemplate): - """A non-standard transaction which is also consensus-invalid should return the consensus error.""" - reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)" + """A non-standard transaction which is also consensus-invalid should return the first error.""" + reject_reason = "mempool-script-verify-flag-failed (Using OP_CODESEPARATOR in non-witness script)" + block_reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)" expect_disconnect = False valid_in_block = False diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 2dfa568c5b..222b238785 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -164,9 +164,12 @@ def run_test(self): self.sign_tx(badtx, attempt_spend_tx) badtx.rehash() badblock = self.update_block(blockname, [badtx]) + reject_reason = (template.block_reject_reason or template.reject_reason) + if reject_reason and reject_reason.startswith("mempool-script-verify-flag-failed"): + reject_reason = "mandatory-script-verify-flag-failed" + reject_reason[33:] self.send_blocks( [badblock], success=False, - reject_reason=(template.block_reject_reason or template.reject_reason), + reject_reason=reject_reason, reconnect=True, timeout=2) self.move_tip(2) diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py index 60b3fb4e20..81cc10a5ad 100755 --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -154,12 +154,14 @@ def run_test(self): coin_vout = coin.prevout.n cltv_invalidate(spendtx, i) + blk_rej = "mandatory-script-verify-flag-failed" + tx_rej = "mempool-script-verify-flag-failed" expected_cltv_reject_reason = [ - "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", - "mandatory-script-verify-flag-failed (Negative locktime)", - "mandatory-script-verify-flag-failed (Locktime requirement not satisfied)", - "mandatory-script-verify-flag-failed (Locktime requirement not satisfied)", - "mandatory-script-verify-flag-failed (Locktime requirement not satisfied)", + " (Operation not valid with the current stack size)", + " (Negative locktime)", + " (Locktime requirement not satisfied)", + " (Locktime requirement not satisfied)", + " (Locktime requirement not satisfied)", ][i] # First we show that this tx is valid except for CLTV by getting it # rejected from the mempool for exactly that reason. @@ -170,8 +172,8 @@ def run_test(self): 'txid': spendtx_txid, 'wtxid': spendtx_wtxid, 'allowed': False, - 'reject-reason': expected_cltv_reject_reason, - 'reject-details': expected_cltv_reject_reason + f", input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:{coin_vout}" + 'reject-reason': tx_rej + expected_cltv_reject_reason, + 'reject-details': tx_rej + expected_cltv_reject_reason + f", input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:{coin_vout}" }], self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0), ) @@ -181,7 +183,7 @@ def run_test(self): block.hashMerkleRoot = block.calc_merkle_root() block.solve() - with self.nodes[0].assert_debug_log(expected_msgs=[f'Block validation error: {expected_cltv_reject_reason}']): + with self.nodes[0].assert_debug_log(expected_msgs=[f'Block validation error: {blk_rej + expected_cltv_reject_reason}']): peer.send_and_ping(msg_block(block)) assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip) peer.sync_with_ping() diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py index 0c3b0f1224..2a7eb0d0f4 100755 --- a/test/functional/feature_dersig.py +++ b/test/functional/feature_dersig.py @@ -123,8 +123,8 @@ def run_test(self): 'txid': spendtx_txid, 'wtxid': spendtx_wtxid, 'allowed': False, - 'reject-reason': 'mandatory-script-verify-flag-failed (Non-canonical DER signature)', - 'reject-details': 'mandatory-script-verify-flag-failed (Non-canonical DER signature), ' + + 'reject-reason': 'mempool-script-verify-flag-failed (Non-canonical DER signature)', + 'reject-details': 'mempool-script-verify-flag-failed (Non-canonical DER signature), ' + f"input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:0" }], self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0), diff --git a/test/functional/feature_nulldummy.py b/test/functional/feature_nulldummy.py index 885bc4855b..e7fe7d65e4 100755 --- a/test/functional/feature_nulldummy.py +++ b/test/functional/feature_nulldummy.py @@ -37,8 +37,8 @@ from test_framework.wallet import getnewdestination from test_framework.wallet_util import generate_keypair -NULLDUMMY_ERROR = "mandatory-script-verify-flag-failed (Dummy CHECKMULTISIG argument must be zero)" - +NULLDUMMY_TX_ERROR = "mempool-script-verify-flag-failed (Dummy CHECKMULTISIG argument must be zero)" +NULLDUMMY_BLK_ERROR = "mandatory-script-verify-flag-failed (Dummy CHECKMULTISIG argument must be zero)" def invalidate_nulldummy_tx(tx): """Transform a NULLDUMMY compliant tx (i.e. scriptSig starts with OP_0) @@ -105,7 +105,7 @@ def run_test(self): addr=self.ms_address, amount=47, privkey=self.privkey) invalidate_nulldummy_tx(test2tx) - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0) + assert_raises_rpc_error(-26, NULLDUMMY_TX_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0) self.log.info(f"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [{COINBASE_MATURITY + 4}]") self.block_submit(self.nodes[0], [test2tx], accept=True) @@ -116,7 +116,7 @@ def run_test(self): privkey=self.privkey) test6txs = [CTransaction(test4tx)] invalidate_nulldummy_tx(test4tx) - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0) + assert_raises_rpc_error(-26, NULLDUMMY_TX_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0) self.block_submit(self.nodes[0], [test4tx], accept=False) self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation") @@ -126,7 +126,7 @@ def run_test(self): privkey=self.privkey) test6txs.append(CTransaction(test5tx)) test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01' - assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0) + assert_raises_rpc_error(-26, NULLDUMMY_TX_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0) self.block_submit(self.nodes[0], [test5tx], with_witness=True, accept=False) self.log.info(f"Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [{COINBASE_MATURITY + 5}]") @@ -142,7 +142,7 @@ def block_submit(self, node, txs, *, with_witness=False, accept): if with_witness: add_witness_commitment(block) block.solve() - assert_equal(None if accept else NULLDUMMY_ERROR, node.submitblock(block.serialize().hex())) + assert_equal(None if accept else NULLDUMMY_BLK_ERROR, node.submitblock(block.serialize().hex())) if accept: assert_equal(node.getbestblockhash(), block.hash) self.lastblockhash = block.hash diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index f98f326e8f..cc664a83aa 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -193,8 +193,8 @@ def run_test(self): assert_equal(self.nodes[2].getbalance(), 20 * Decimal("49.999")) self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid") - self.fail_accept(self.nodes[2], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WPKH][1], sign=False) - self.fail_accept(self.nodes[2], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WSH][1], sign=False) + self.fail_accept(self.nodes[2], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WPKH][1], sign=False) + self.fail_accept(self.nodes[2], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WSH][1], sign=False) self.generate(self.nodes[0], 1) # block 164 @@ -213,13 +213,13 @@ def run_test(self): self.log.info("Verify default node can't accept txs with missing witness") # unsigned, no scriptsig - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program hash mismatch)", wit_ids[NODE_0][P2WPKH][0], sign=False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program was passed an empty witness)", wit_ids[NODE_0][P2WSH][0], sign=False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WPKH][0], sign=False) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WSH][0], sign=False) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program hash mismatch)", wit_ids[NODE_0][P2WPKH][0], sign=False) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program was passed an empty witness)", wit_ids[NODE_0][P2WSH][0], sign=False) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WPKH][0], sign=False) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WSH][0], sign=False) # unsigned with redeem script - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program hash mismatch)", p2sh_ids[NODE_0][P2WPKH][0], sign=False, redeem_script=witness_script(False, self.pubkey[0])) - self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program was passed an empty witness)", p2sh_ids[NODE_0][P2WSH][0], sign=False, redeem_script=witness_script(True, self.pubkey[0])) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program hash mismatch)", p2sh_ids[NODE_0][P2WPKH][0], sign=False, redeem_script=witness_script(False, self.pubkey[0])) + self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program was passed an empty witness)", p2sh_ids[NODE_0][P2WSH][0], sign=False, redeem_script=witness_script(True, self.pubkey[0])) # Coinbase contains the witness commitment nonce, check that RPC shows us coinbase_txid = self.nodes[2].getblock(blockhash)['tx'][0] @@ -230,10 +230,10 @@ def run_test(self): assert_equal(witnesses[0], '00' * 32) self.log.info("Verify witness txs without witness data are invalid after the fork") - self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program hash mismatch)', wit_ids[NODE_2][P2WPKH][2], sign=False) - self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program was passed an empty witness)', wit_ids[NODE_2][P2WSH][2], sign=False) - self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program hash mismatch)', p2sh_ids[NODE_2][P2WPKH][2], sign=False, redeem_script=witness_script(False, self.pubkey[2])) - self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program was passed an empty witness)', p2sh_ids[NODE_2][P2WSH][2], sign=False, redeem_script=witness_script(True, self.pubkey[2])) + self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program hash mismatch)', wit_ids[NODE_2][P2WPKH][2], sign=False) + self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program was passed an empty witness)', wit_ids[NODE_2][P2WSH][2], sign=False) + self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program hash mismatch)', p2sh_ids[NODE_2][P2WPKH][2], sign=False, redeem_script=witness_script(False, self.pubkey[2])) + self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program was passed an empty witness)', p2sh_ids[NODE_2][P2WSH][2], sign=False, redeem_script=witness_script(True, self.pubkey[2])) self.log.info("Verify default node can now use witness txs") self.success_mine(self.nodes[0], wit_ids[NODE_0][P2WPKH][0], True) diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index 2155b8de6b..32d8f7f6ea 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -441,7 +441,7 @@ def run_test(self): nested_anchor_spend.rehash() self.check_mempool_result( - result_expected=[{'txid': nested_anchor_spend.rehash(), 'allowed': False, 'reject-reason': 'non-mandatory-script-verify-flag (Witness version reserved for soft-fork upgrades)'}], + result_expected=[{'txid': nested_anchor_spend.rehash(), 'allowed': False, 'reject-reason': 'mempool-script-verify-flag-failed (Witness version reserved for soft-fork upgrades)'}], rawtxs=[nested_anchor_spend.serialize().hex()], maxfeerate=0, ) diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index e8f7f7e0f4..7815d6ea84 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -704,20 +704,20 @@ def test_p2sh_witness(self): # segwit activation. Note that older bitcoind's that are not # segwit-aware would also reject this for failing CLEANSTACK. with self.nodes[0].assert_debug_log( - expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)']): + expected_msgs=[spend_tx.hash, 'was not accepted: mempool-script-verify-flag-failed (Witness program was passed an empty witness)']): test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) # The transaction was detected as witness stripped above and not added to the reject # filter. Trying again will check it again and result in the same error. with self.nodes[0].assert_debug_log( - expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)']): + expected_msgs=[spend_tx.hash, 'was not accepted: mempool-script-verify-flag-failed (Witness program was passed an empty witness)']): test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) # Try to put the witness script in the scriptSig, should also fail. spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a']) spend_tx.rehash() with self.nodes[0].assert_debug_log( - expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)']): + expected_msgs=[spend_tx.hash, 'was not accepted: mempool-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)']): test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False) # Now put the witness script in the witness, should succeed after @@ -1291,7 +1291,7 @@ def test_tx_relay_after_segwit_activation(self): # Now do the opposite: strip the witness entirely. This will be detected as witness stripping and # the (w)txid won't be added to the reject filter: we can try again and get the same error. tx3.wit.vtxinwit[0].scriptWitness.stack = [] - reason = "was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)" + reason = "was not accepted: mempool-script-verify-flag-failed (Witness program was passed an empty witness)" test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=False, accepted=False, reason=reason) test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=False, accepted=False, reason=reason) @@ -1490,7 +1490,7 @@ def test_uncompressed_pubkey(self): sign_input_segwitv0(tx2, 0, script, tx.vout[0].nValue, key) # Should fail policy test. - test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') + test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'mempool-script-verify-flag-failed (Using non-compressed keys in segwit)') # But passes consensus. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx2]) @@ -1509,7 +1509,7 @@ def test_uncompressed_pubkey(self): sign_p2pk_witness_input(witness_script, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key) # Should fail policy test. - test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') + test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'mempool-script-verify-flag-failed (Using non-compressed keys in segwit)') # But passes consensus. block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx3]) @@ -1526,7 +1526,7 @@ def test_uncompressed_pubkey(self): sign_p2pk_witness_input(witness_script, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key) # Should fail policy test. - test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)') + test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'mempool-script-verify-flag-failed (Using non-compressed keys in segwit)') block = self.build_next_block() self.update_witness_block_with_transactions(block, [tx4]) test_witness_block(self.nodes[0], self.test_node, block, accepted=True) diff --git a/test/functional/rpc_packages.py b/test/functional/rpc_packages.py index a2f9210f94..539e9d09ad 100755 --- a/test/functional/rpc_packages.py +++ b/test/functional/rpc_packages.py @@ -122,8 +122,8 @@ def test_independent(self, coin): assert_equal(testres_bad_sig, self.independent_txns_testres + [{ "txid": tx_bad_sig_txid, "wtxid": tx_bad_sig_wtxid, "allowed": False, - "reject-reason": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", - "reject-details": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size), " + + "reject-reason": "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", + "reject-details": "mempool-script-verify-flag-failed (Operation not valid with the current stack size), " + f"input 0 of {tx_bad_sig_txid} (wtxid {tx_bad_sig_wtxid}), spending {coin['txid']}:{coin['vout']}" }]) From 020ed613bed1148888692cb37e3522202bfca44e Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Wed, 30 Jul 2025 15:56:57 -0400 Subject: [PATCH 132/356] validation: detect witness stripping without re-running Script checks Since it was introduced in 4eb515574e1012bc8ea5dafc3042dcdf4c766f26 (#18044), the detection of a stripped witness relies on running the Script checks 3 times. In the worst case, this consists in running Script validation 3 times for every single input. Detection of a stripped witness is necessary because in this case wtxid==txid, and the transaction's wtxid must not be added to the reject filter or it could allow a malicious peer to interfere with txid-based orphan resolution as used in 1p1c package relay. However it is not necessary to run Script validation to detect a stripped witness (much less so doing it 3 times in a row). There are 3 types of witness program: defined program types (Taproot, P2WPKH, P2WSH), undefined types, and the Pay-to-anchor carve-out. For defined program types, Script validation with an empty witness will always fail (by consensus). For undefined program types, Script validation is always going to fail regardless of the witness (by standardness). For P2A, an empty witness is never going to lead to a failure. Therefore it holds that we can always detect a stripped witness without re-running Script validation. However this might lead to more "false positives" (cases where we return witness stripping for an otherwise invalid transaction) than the existing implementation. For instance a transaction with one P2PKH input with an invalid signature and one P2WPKH input with its witness stripped. The existing implementation would treat it as consensus invalid while the implementation in this commit would always consider it witness stripped. Github-Pull: #33105 Rebased-From: 27aefac42505e9c083fa131d3d7edbec7803f3c0 --- src/validation.cpp | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index fde064458d..36734bc612 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1236,13 +1236,8 @@ bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws) // Check input scripts and signatures. // This is done last to help prevent CPU exhaustion denial-of-service attacks. if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata, GetValidationCache())) { - // SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we - // need to turn both off, and compare against just turning off CLEANSTACK - // to see if the failure is specifically due to witness validation. - TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts - if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, ws.m_precomputed_txdata, GetValidationCache()) && - !CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, ws.m_precomputed_txdata, GetValidationCache())) { - // Only the witness is missing, so the transaction itself may be fine. + // Detect a failure due to a missing witness so that p2p code can handle rejection caching appropriately. + if (!tx.HasWitness() && SpendsNonAnchorWitnessProg(tx, m_view)) { state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED, state.GetRejectReason(), state.GetDebugMessage()); } From f24291bd96f92ecc0fc04317fd93747eeb2d557a Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Tue, 22 Jul 2025 11:23:16 -0400 Subject: [PATCH 133/356] qa: unit test sighash caching Github-Pull: #32473 Rebased-From: 83950275eddacac56c58a7a3648ed435a5593328 --- src/test/sighash_tests.cpp | 90 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index d3320878ec..6e2ec800e7 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -207,4 +207,94 @@ BOOST_AUTO_TEST_CASE(sighash_from_data) BOOST_CHECK_MESSAGE(sh.GetHex() == sigHashHex, strTest); } } + +BOOST_AUTO_TEST_CASE(sighash_caching) +{ + // Get a script, transaction and parameters as inputs to the sighash function. + CScript scriptcode; + RandomScript(scriptcode); + CScript diff_scriptcode{scriptcode}; + diff_scriptcode << OP_1; + CMutableTransaction tx; + RandomTransaction(tx, /*fSingle=*/false); + const auto in_index{static_cast(m_rng.randrange(tx.vin.size()))}; + const auto amount{m_rng.rand()}; + + // Exercise the sighash function under both legacy and segwit v0. + for (const auto sigversion: {SigVersion::BASE, SigVersion::WITNESS_V0}) { + // For each, run it against all the 6 standard hash types and a few additional random ones. + std::vector hash_types{{SIGHASH_ALL, SIGHASH_SINGLE, SIGHASH_NONE, SIGHASH_ALL | SIGHASH_ANYONECANPAY, + SIGHASH_SINGLE | SIGHASH_ANYONECANPAY, SIGHASH_NONE | SIGHASH_ANYONECANPAY, + SIGHASH_ANYONECANPAY, 0, std::numeric_limits::max()}}; + for (int i{0}; i < 10; ++i) { + hash_types.push_back(i % 2 == 0 ? m_rng.rand() : m_rng.rand()); + } + + // Reuse the same cache across script types. This must not cause any issue as the cached value for one hash type must never + // be confused for another (instantiating the cache within the loop instead would prevent testing this). + SigHashCache cache; + for (const auto hash_type: hash_types) { + const bool expect_one{sigversion == SigVersion::BASE && ((hash_type & 0x1f) == SIGHASH_SINGLE) && in_index >= tx.vout.size()}; + + // The result of computing the sighash should be the same with or without cache. + const auto sighash_with_cache{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)}; + const auto sighash_no_cache{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, nullptr)}; + BOOST_CHECK_EQUAL(sighash_with_cache, sighash_no_cache); + + // Calling the cached version again should return the same value again. + BOOST_CHECK_EQUAL(sighash_with_cache, SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)); + + // While here we might as well also check that the result for legacy is the same as for the old SignatureHash() function. + if (sigversion == SigVersion::BASE) { + BOOST_CHECK_EQUAL(sighash_with_cache, SignatureHashOld(scriptcode, CTransaction(tx), in_index, hash_type)); + } + + // Calling with a different scriptcode (for instance in case a CODESEP is encountered) will not return the cache value but + // overwrite it. The sighash will always be different except in case of legacy SIGHASH_SINGLE bug. + const auto sighash_with_cache2{SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)}; + const auto sighash_no_cache2{SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, nullptr)}; + BOOST_CHECK_EQUAL(sighash_with_cache2, sighash_no_cache2); + if (!expect_one) { + BOOST_CHECK_NE(sighash_with_cache, sighash_with_cache2); + } else { + BOOST_CHECK_EQUAL(sighash_with_cache, sighash_with_cache2); + BOOST_CHECK_EQUAL(sighash_with_cache, uint256::ONE); + } + + // Calling the cached version again should return the same value again. + BOOST_CHECK_EQUAL(sighash_with_cache2, SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)); + + // And if we store a different value for this scriptcode and hash type it will return that instead. + { + HashWriter h{}; + h << 42; + cache.Store(hash_type, scriptcode, h); + const auto stored_hash{h.GetHash()}; + BOOST_CHECK(cache.Load(hash_type, scriptcode, h)); + const auto loaded_hash{h.GetHash()}; + BOOST_CHECK_EQUAL(stored_hash, loaded_hash); + } + + // And using this mutated cache with the sighash function will return the new value (except in the legacy SIGHASH_SINGLE bug + // case in which it'll return 1). + if (!expect_one) { + BOOST_CHECK_NE(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), sighash_with_cache); + HashWriter h{}; + BOOST_CHECK(cache.Load(hash_type, scriptcode, h)); + h << hash_type; + const auto new_hash{h.GetHash()}; + BOOST_CHECK_EQUAL(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), new_hash); + } else { + BOOST_CHECK_EQUAL(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), uint256::ONE); + } + + // Wipe the cache and restore the correct cached value for this scriptcode and hash_type before starting the next iteration. + HashWriter dummy{}; + cache.Store(hash_type, diff_scriptcode, dummy); + (void)SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache); + BOOST_CHECK(cache.Load(hash_type, scriptcode, dummy) || expect_one); + } + } +} + BOOST_AUTO_TEST_SUITE_END() From 6f136cd3914b001752cce02adde00fccaed0ad48 Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Fri, 8 Aug 2025 23:15:17 +1000 Subject: [PATCH 134/356] tests: drop expect_disconnect behaviour for tx relay Github-Pull: #33050 Rebased-From: 876dbdfb4702410dfd4037614dc9298a0c09c63e --- test/functional/data/invalid_txs.py | 18 ------------------ test/functional/p2p_invalid_tx.py | 5 ----- test/functional/test_framework/p2p.py | 8 ++------ 3 files changed, 2 insertions(+), 29 deletions(-) diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py index bb1931be2d..f96059d4ee 100644 --- a/test/functional/data/invalid_txs.py +++ b/test/functional/data/invalid_txs.py @@ -69,9 +69,6 @@ class BadTxTemplate: # Only specified if it differs from mempool acceptance error. block_reject_reason = "" - # Do we expect to be disconnected after submitting this tx? - expect_disconnect = False - # Is this tx considered valid when included in a block, but not for acceptance into # the mempool (i.e. does it violate policy but not consensus)? valid_in_block = False @@ -89,7 +86,6 @@ def get_tx(self, *args, **kwargs): class OutputMissing(BadTxTemplate): reject_reason = "bad-txns-vout-empty" - expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -100,7 +96,6 @@ def get_tx(self): class InputMissing(BadTxTemplate): reject_reason = "bad-txns-vin-empty" - expect_disconnect = False # We use a blank transaction here to make sure # it is interpreted as a non-witness transaction. @@ -117,7 +112,6 @@ def get_tx(self): # tree depth commitment (CVE-2017-12842) class SizeTooSmall(BadTxTemplate): reject_reason = "tx-size-small" - expect_disconnect = False valid_in_block = True def get_tx(self): @@ -134,7 +128,6 @@ class BadInputOutpointIndex(BadTxTemplate): # Won't be rejected - nonexistent outpoint index is treated as an orphan since the coins # database can't distinguish between spent outpoints and outpoints which never existed. reject_reason = None - expect_disconnect = False def get_tx(self): num_indices = len(self.spend_tx.vin) @@ -149,7 +142,6 @@ def get_tx(self): class DuplicateInput(BadTxTemplate): reject_reason = 'bad-txns-inputs-duplicate' - expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -162,7 +154,6 @@ def get_tx(self): class PrevoutNullInput(BadTxTemplate): reject_reason = 'bad-txns-prevout-null' - expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -175,7 +166,6 @@ def get_tx(self): class NonexistentInput(BadTxTemplate): reject_reason = None # Added as an orphan tx. - expect_disconnect = False def get_tx(self): tx = CTransaction() @@ -188,7 +178,6 @@ def get_tx(self): class SpendTooMuch(BadTxTemplate): reject_reason = 'bad-txns-in-belowout' - expect_disconnect = False def get_tx(self): return create_tx_with_script( @@ -197,7 +186,6 @@ def get_tx(self): class CreateNegative(BadTxTemplate): reject_reason = 'bad-txns-vout-negative' - expect_disconnect = False def get_tx(self): return create_tx_with_script(self.spend_tx, 0, amount=-1) @@ -205,7 +193,6 @@ def get_tx(self): class CreateTooLarge(BadTxTemplate): reject_reason = 'bad-txns-vout-toolarge' - expect_disconnect = False def get_tx(self): return create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY + 1) @@ -213,7 +200,6 @@ def get_tx(self): class CreateSumTooLarge(BadTxTemplate): reject_reason = 'bad-txns-txouttotal-toolarge' - expect_disconnect = False def get_tx(self): tx = create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY) @@ -224,7 +210,6 @@ def get_tx(self): class InvalidOPIFConstruction(BadTxTemplate): reject_reason = "mempool-script-verify-flag-failed (Invalid OP_IF construction)" - expect_disconnect = False valid_in_block = True def get_tx(self): @@ -236,7 +221,6 @@ def get_tx(self): class TooManySigops(BadTxTemplate): reject_reason = "bad-txns-too-many-sigops" block_reject_reason = "bad-blk-sigops, out-of-bounds SigOpCount" - expect_disconnect = False def get_tx(self): lotsa_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) @@ -258,7 +242,6 @@ def get_tx(self): return type('DisabledOpcode_' + str(opcode), (BadTxTemplate,), { 'reject_reason': "disabled opcode", - 'expect_disconnect': True, 'get_tx': get_tx, 'valid_in_block' : True }) @@ -267,7 +250,6 @@ class NonStandardAndInvalid(BadTxTemplate): """A non-standard transaction which is also consensus-invalid should return the first error.""" reject_reason = "mempool-script-verify-flag-failed (Using OP_CODESEPARATOR in non-witness script)" block_reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)" - expect_disconnect = False valid_in_block = False def get_tx(self): diff --git a/test/functional/p2p_invalid_tx.py b/test/functional/p2p_invalid_tx.py index 3785f725fe..439735d178 100755 --- a/test/functional/p2p_invalid_tx.py +++ b/test/functional/p2p_invalid_tx.py @@ -73,14 +73,9 @@ def run_test(self): tx = template.get_tx() node.p2ps[0].send_txs_and_test( [tx], node, success=False, - expect_disconnect=False, reject_reason=template.reject_reason, ) - if template.expect_disconnect: - self.log.info("Reconnecting to peer") - self.reconnect_p2p() - # Make two p2p connections to provide the node with orphans # * p2ps[0] will send valid orphan txs (one with low fee) # * p2ps[1] will send an invalid orphan tx (and is later disconnected for that) diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index 207d19137b..c5e518238c 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -893,13 +893,12 @@ def send_blocks_and_test(self, blocks, node, *, success=True, force_send=False, else: assert node.getbestblockhash() != blocks[-1].hash - def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None): + def send_txs_and_test(self, txs, node, *, success=True, reject_reason=None): """Send txs to test node and test whether they're accepted to the mempool. - add all txs to our tx_store - send tx messages for all txs - if success is True/False: assert that the txs are/are not accepted to the mempool - - if expect_disconnect is True: Skip the sync with ping - if reject_reason is set: assert that the correct reject message is logged.""" with p2p_lock: @@ -911,10 +910,7 @@ def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, for tx in txs: self.send_message(msg_tx(tx)) - if expect_disconnect: - self.wait_for_disconnect() - else: - self.sync_with_ping() + self.sync_with_ping() raw_mempool = node.getrawmempool() if success: From fa8f851a2a56ec3566816e7e056a1a2f9854ed29 Mon Sep 17 00:00:00 2001 From: scgbckbone Date: Tue, 17 Dec 2024 11:21:56 +0100 Subject: [PATCH 135/356] bugfix: disallow label for ranged descriptors & allow external non-ranged descriptors to have label * do not only check user provided range data to decide whether descriptor is ranged * properly handle std::optional when checking if descriptor is internal Github-Pull: #31514 Rebased-From: 664657ed134365588914c2cf6a3975ce368a4f49 --- src/wallet/rpc/backup.cpp | 8 +++++--- test/functional/wallet_importdescriptors.py | 13 +++++++++++++ test/functional/wallet_rescan_unconfirmed.py | 3 ++- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/src/wallet/rpc/backup.cpp b/src/wallet/rpc/backup.cpp index ac23b092d4..4040036962 100644 --- a/src/wallet/rpc/backup.cpp +++ b/src/wallet/rpc/backup.cpp @@ -1482,6 +1482,7 @@ static UniValue ProcessDescriptorImport(CWallet& wallet, const UniValue& data, c } // Range check + std::optional is_ranged; int64_t range_start = 0, range_end = 1, next_index = 0; if (!parsed_descs.at(0)->IsRange() && data.exists("range")) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Range should not be specified for an un-ranged descriptor"); @@ -1496,6 +1497,7 @@ static UniValue ProcessDescriptorImport(CWallet& wallet, const UniValue& data, c range_end = wallet.m_keypool_size; } next_index = range_start; + is_ranged = true; if (data.exists("next_index")) { next_index = data["next_index"].getInt(); @@ -1517,12 +1519,13 @@ static UniValue ProcessDescriptorImport(CWallet& wallet, const UniValue& data, c } // Ranged descriptors should not have a label - if (data.exists("range") && data.exists("label")) { + if (is_ranged.has_value() && is_ranged.value() && data.exists("label")) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Ranged descriptors should not have a label"); } + bool desc_internal = internal.has_value() && internal.value(); // Internal addresses should not have a label either - if (internal && data.exists("label")) { + if (desc_internal && data.exists("label")) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Internal addresses should not have a label"); } @@ -1538,7 +1541,6 @@ static UniValue ProcessDescriptorImport(CWallet& wallet, const UniValue& data, c for (size_t j = 0; j < parsed_descs.size(); ++j) { auto parsed_desc = std::move(parsed_descs[j]); - bool desc_internal = internal.has_value() && internal.value(); if (parsed_descs.size() == 2) { desc_internal = j == 1; } else if (parsed_descs.size() > 2) { diff --git a/test/functional/wallet_importdescriptors.py b/test/functional/wallet_importdescriptors.py index 84c07b6a28..c5baf9ba5b 100755 --- a/test/functional/wallet_importdescriptors.py +++ b/test/functional/wallet_importdescriptors.py @@ -119,6 +119,9 @@ def run_test(self): error_code=-8, error_message="Internal addresses should not have a label") + self.log.info("External non-ranged addresses can have labels") + self.test_importdesc({**import_request, "internal": False}, success=True) + self.log.info("Internal addresses should be detected as such") key = get_generate_key() self.test_importdesc({"desc": descsum_create("pkh(" + key.pubkey + ")"), @@ -206,6 +209,16 @@ def run_test(self): error_code=-8, error_message='Ranged descriptors should not have a label') + self.log.info("Ranged descriptors cannot have labels - even if range not provided by user and only implied by asterisk (*)") + self.test_importdesc({"desc":descsum_create("wpkh(" + xpub + "/100/0/*)"), + "timestamp": "now", + "label": "test", + "active": True}, + success=False, + warnings=['Range not given, using default keypool range'], + error_code=-8, + error_message='Ranged descriptors should not have a label') + self.log.info("Private keys required for private keys enabled wallet") self.test_importdesc({"desc":descsum_create(desc), "timestamp": "now", diff --git a/test/functional/wallet_rescan_unconfirmed.py b/test/functional/wallet_rescan_unconfirmed.py index 69ad522b5d..9bdc932aa3 100755 --- a/test/functional/wallet_rescan_unconfirmed.py +++ b/test/functional/wallet_rescan_unconfirmed.py @@ -66,7 +66,8 @@ def run_test(self): assert tx_parent_to_reorg["txid"] in node.getrawmempool() self.log.info("Import descriptor wallet on another node") - descriptors_to_import = [{"desc": w0.getaddressinfo(parent_address)['parent_desc'], "timestamp": 0, "label": "w0 import"}] + # descriptor is ranged - label not allowed + descriptors_to_import = [{"desc": w0.getaddressinfo(parent_address)['parent_desc'], "timestamp": 0}] node.createwallet(wallet_name="w1", disable_private_keys=True) w1 = node.get_wallet_rpc("w1") From 18760fed9769ae1d2ccfe65065492b6867f63152 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Fri, 24 Oct 2025 16:40:35 +0200 Subject: [PATCH 136/356] test: Use same rpc timeout for authproxy and cli Github-Pull: #33698 Rebased-From: 66667d6512294fd5dd02161b7c68c19af0865865 --- test/functional/test_framework/test_node.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 3baa78fd79..d76f01efde 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -83,7 +83,8 @@ def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, self.stderr_dir = self.datadir_path / "stderr" self.chain = chain self.rpchost = rpchost - self.rpc_timeout = timewait + self.rpc_timeout = timewait # Already multiplied by timeout_factor + self.timeout_factor = timeout_factor self.binary = bitcoind self.coverage_dir = coverage_dir self.cwd = cwd @@ -140,7 +141,11 @@ def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, # v2transport requested but not supported for node assert not v2transport - self.cli = TestNodeCLI(bitcoin_cli, self.datadir_path) + self.cli = TestNodeCLI( + bitcoin_cli, + self.datadir_path, + self.rpc_timeout // 2, # timeout identical to the one used in self._rpc + ) self.use_cli = use_cli self.start_perf = start_perf @@ -155,7 +160,6 @@ def __init__(self, i, datadir_path, *, chain, rpchost, timewait, timeout_factor, self.perf_subprocesses = {} self.p2ps = [] - self.timeout_factor = timeout_factor self.mocktime = None @@ -839,16 +843,17 @@ def arg_to_cli(arg): class TestNodeCLI(): """Interface to bitcoin-cli for an individual node""" - def __init__(self, binary, datadir): + def __init__(self, binary, datadir, rpc_timeout): self.options = [] self.binary = binary self.datadir = datadir + self.rpc_timeout = rpc_timeout self.input = None self.log = logging.getLogger('TestFramework.bitcoincli') def __call__(self, *options, input=None): # TestNodeCLI is callable with bitcoin-cli command-line options - cli = TestNodeCLI(self.binary, self.datadir) + cli = TestNodeCLI(self.binary, self.datadir, self.rpc_timeout) cli.options = [str(o) for o in options] cli.input = input return cli @@ -869,7 +874,11 @@ def send_cli(self, clicommand=None, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [arg_to_cli(arg) for arg in args] named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()] - p_args = [self.binary, f"-datadir={self.datadir}"] + self.options + p_args = [ + self.binary, + f"-datadir={self.datadir}", + f"-rpcclienttimeout={int(self.rpc_timeout)}", + ] + self.options if named_args: p_args += ["-named"] if clicommand is not None: From 3152b0db5d1ed330e1dac7740f982adc4829b9e5 Mon Sep 17 00:00:00 2001 From: WakeTrainDev <175499942+waketraindev@users.noreply.github.com> Date: Fri, 10 Oct 2025 23:55:06 +0300 Subject: [PATCH 137/356] qt: add createwallet, createwalletdescriptor, and migratewallet to history filter Github-Pull: gui#901 Rebased-From: 4e352efa2ce756c668664486c99d003eef530e0c --- src/qt/rpcconsole.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index ae3f9aa686..c1e2c1343c 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -70,6 +70,9 @@ namespace { // don't add private key handling cmd's to the history const QStringList historyFilter = QStringList() + << "createwallet" + << "createwalletdescriptor" + << "migratewallet" << "importprivkey" << "importmulti" << "sethdseed" From 065af0f17c0d158e84251b1c9ace8ad0d1de275a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Fri, 31 Oct 2025 15:41:35 +0100 Subject: [PATCH 138/356] log,blocks: avoid `GetHash()` work when logging is disabled `PartiallyDownloadedBlock::FillBlock()` calculated the header hash that's only needed when debug logging is enabled. Github-Pull: #33738 Rebased-From: 10e0e96e703a40b298b87e9943f85d5189b9e3dc (partial) --- src/blockencodings.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/blockencodings.cpp b/src/blockencodings.cpp index 5f4061a71d..8622c87756 100644 --- a/src/blockencodings.cpp +++ b/src/blockencodings.cpp @@ -184,7 +184,6 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector< { if (header.IsNull()) return READ_STATUS_INVALID; - uint256 hash = header.GetHash(); block = header; block.vtx.resize(txn_available.size()); @@ -217,12 +216,15 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector< return READ_STATUS_CHECKBLOCK_FAILED; } + if (LogAcceptCategory(BCLog::CMPCTBLOCK, BCLog::Level::Debug)) { + const uint256 hash{block.GetHash()}; // avoid cleared header LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size()); if (vtx_missing.size() < 5) { for (const auto& tx : vtx_missing) { LogDebug(BCLog::CMPCTBLOCK, "Reconstructed block %s required tx %s\n", hash.ToString(), tx->GetHash().ToString()); } } + } return READ_STATUS_OK; } From 896e9c3d3a17583b04944c099516a7651d97f000 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 4 Nov 2025 13:47:46 +0000 Subject: [PATCH 139/356] mempressure: Disable by default for now - Newer Linux versions no longer return the info we need in sysinfo - Flushing has different methods of working now (sync vs dropping clean caches) - Need a way to ensure the OS actually releases memory when we free it --- src/util/mempressure.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/mempressure.cpp b/src/util/mempressure.cpp index 177d046377..e2127fd798 100644 --- a/src/util/mempressure.cpp +++ b/src/util/mempressure.cpp @@ -19,7 +19,7 @@ #include #include -size_t g_low_memory_threshold{64_MiB}; +size_t g_low_memory_threshold{0}; bool SystemNeedsMemoryReleased() { From 31c574fac9a485a8ca13663b81d56182855cc870 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 4 Nov 2025 14:15:41 +0000 Subject: [PATCH 140/356] GUI: MempoolStats: Use min relay fee when mempool has none Co-authored-by: /dev/fd0 Github-Pull: knots#226 --- src/validation.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 23cdd12b2c..62f4a2e618 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1418,8 +1418,6 @@ MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef AssertLockHeld(cs_main); LOCK(m_pool.cs); // mempool "read lock" (held through m_pool.m_opts.signals->TransactionAddedToMempool()) - const CFeeRate mempool_min_fee_rate = m_pool.GetMinFee(); - Workspace ws(ptx); const std::vector single_wtxid{ws.m_ptx->GetWitnessHash()}; @@ -1500,7 +1498,8 @@ MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef } // update mempool stats cache - CStats::DefaultStats()->addMempoolSample(m_pool.size(), m_pool.DynamicMemoryUsage(), mempool_min_fee_rate.GetFeePerK()); + const CFeeRate min_fee_rate = std::max(m_pool.GetMinFee(), m_pool.m_opts.min_relay_feerate); + CStats::DefaultStats()->addMempoolSample(m_pool.size(), m_pool.DynamicMemoryUsage(), min_fee_rate.GetFeePerK()); return MempoolAcceptResult::Success(std::move(m_subpackage.m_replaced_transactions), ws.m_vsize, ws.m_base_fees, effective_feerate, single_wtxid); @@ -3128,7 +3127,8 @@ bool Chainstate::DisconnectTip(BlockValidationState& state, DisconnectedBlockTra if (m_mempool) { // add mempool stats sample - CStats::DefaultStats()->addMempoolSample(m_mempool->size(), m_mempool->DynamicMemoryUsage(), m_mempool->GetMinFee().GetFeePerK()); + const CFeeRate min_fee_rate = std::max(m_mempool->GetMinFee(), m_mempool->m_opts.min_relay_feerate); + CStats::DefaultStats()->addMempoolSample(m_mempool->size(), m_mempool->DynamicMemoryUsage(), min_fee_rate.GetFeePerK()); } return true; @@ -3256,7 +3256,8 @@ bool Chainstate::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, if (m_mempool) { // add mempool stats sample - CStats::DefaultStats()->addMempoolSample(m_mempool->size(), m_mempool->DynamicMemoryUsage(), m_mempool->GetMinFee().GetFeePerK()); + const CFeeRate min_fee_rate = std::max(m_mempool->GetMinFee(), m_mempool->m_opts.min_relay_feerate); + CStats::DefaultStats()->addMempoolSample(m_mempool->size(), m_mempool->DynamicMemoryUsage(), min_fee_rate.GetFeePerK()); } const auto time_6{SteadyClock::now()}; From 2e1d9cb3466451b4394c8b48d9078dbfc0e5d6ec Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 4 Nov 2025 18:09:08 +0000 Subject: [PATCH 141/356] Revert "add migratewallet rpc in historyFilter" This reverts commit d7bc5138e196684c5d615eec50b7a5c61080775d. --- src/qt/rpcconsole.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index 701ef1d9c2..5ebacbc500 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -80,8 +80,7 @@ const QStringList historyFilter = QStringList() << "signrawtransactionwithkey" << "walletpassphrase" << "walletpassphrasechange" - << "encryptwallet" - << "migratewallet"; + << "encryptwallet"; } From 6323b42e07f9be8939f0ae1f85165d5b492ad83c Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 4 Nov 2025 17:54:11 +0000 Subject: [PATCH 142/356] Default policy: Increase datacarriersize to 83 bytes --- src/init.cpp | 1 - src/policy/policy.h | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 44dc91a7e1..f88acd40ce 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -853,7 +853,6 @@ void InitParameterInteraction(ArgsManager& args) args.SoftSetArg("-rejectparasites", "0"); args.SoftSetArg("-datacarriercost", "0.25"); args.SoftSetArg("-datacarrierfullcount", "0"); - args.SoftSetArg("-datacarriersize", "83"); args.SoftSetArg("-maxtxlegacysigops", strprintf("%s", std::numeric_limits::max())); args.SoftSetArg("-maxscriptsize", strprintf("%s", std::numeric_limits::max())); args.SoftSetArg("-mempooltruc", "enforce"); diff --git a/src/policy/policy.h b/src/policy/policy.h index a9c557279e..d27221805c 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -115,12 +115,12 @@ static constexpr unsigned int DEFAULT_DESCENDANT_SIZE_LIMIT_KVB{101}; /** Default for -datacarrier */ static const bool DEFAULT_ACCEPT_DATACARRIER = true; /** - * Default setting for -datacarriersize. 40 bytes of data, +1 for OP_RETURN, - * +1 for the pushdata opcode. + * Default setting for -datacarriersize. 80 bytes of data, +1 for OP_RETURN, + * +2 for the pushdata opcodes. */ /** Default for -permitbaredatacarrier */ static const bool DEFAULT_PERMITBAREDATACARRIER{false}; -static constexpr unsigned int MAX_OP_RETURN_RELAY{42}; +static const unsigned int MAX_OP_RETURN_RELAY = 83; /** Default for -datacarrierfullcount */ static constexpr bool DEFAULT_DATACARRIER_FULLCOUNT{true}; /** From 647eca80eca9f0407213f6799faadeef874c1652 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Thu, 16 Oct 2025 11:39:24 +0200 Subject: [PATCH 143/356] ci: Only write docker build images to Cirrus cache Other cache providers offer too little space for this to be useful. Github-Pull: #33639 Rebased-From: fabe0e07de1ad2f26da62f3ebe0e9be3f939b1f8 (silent merge failure) --- .github/actions/configure-docker/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/configure-docker/action.yml b/.github/actions/configure-docker/action.yml index c78df86b6c..458f7e2157 100644 --- a/.github/actions/configure-docker/action.yml +++ b/.github/actions/configure-docker/action.yml @@ -41,8 +41,8 @@ runs: # Always optimistically --cache‑from in case a cache blob exists args=(--cache-from "type=gha${url_args:+,${url_args}},scope=${CONTAINER_NAME}") - # If this is a push to the default branch, also add --cache‑to to save the cache - if [[ ${{ github.event_name }} == "push" && ${{ github.ref_name }} == ${{ github.event.repository.default_branch }} ]]; then + # Only add --cache-to when using the Cirrus cache provider and pushing to the default branch. + if [[ ${{ inputs.use-cirrus }} == 'true' && ${{ github.event_name }} == "push" && ${{ github.ref_name }} == ${{ github.event.repository.default_branch }} ]]; then args+=(--cache-to "type=gha${url_args:+,${url_args}},mode=max,ignore-error=true,scope=${CONTAINER_NAME}") fi From 7498da0c2abf9e5d8a57cecb186d3c9beecd893d Mon Sep 17 00:00:00 2001 From: Ataraxia Date: Fri, 7 Nov 2025 11:29:49 +0530 Subject: [PATCH 144/356] Changing the rpcbind argument being ignored to a pop up warning, instead of a debug log Github-Pull: #33813 Rebased-From: 0cca5b772a9a76edc2f15e2f476ea138046c9cb8 --- src/httpserver.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/httpserver.cpp b/src/httpserver.cpp index bd2dec19b9..4c7e75fbf3 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -373,7 +373,7 @@ static bool HTTPBindAddresses(struct evhttp* http) LogPrintf("WARNING: option -rpcallowip was specified without -rpcbind; this doesn't usually make sense\n"); } if (!gArgs.GetArgs("-rpcbind").empty()) { - LogPrintf("WARNING: option -rpcbind was ignored because -rpcallowip was not specified, refusing to allow everyone to connect\n"); + InitWarning(_("option -rpcbind was ignored because -rpcallowip was not specified, refusing to allow everyone to connect\n")); } } else { // Specific bind addresses for (const std::string& strRPCBind : gArgs.GetArgs("-rpcbind")) { From 2cf352fd8e6a77003e38d954b6c879b20d4b960a Mon Sep 17 00:00:00 2001 From: will Date: Tue, 14 Oct 2025 13:57:20 +0100 Subject: [PATCH 145/356] doc: document capnproto and libmultiprocess deps These dependencies are both undocumented, and libmultiprocess has a relatively special requirement in that v6.0 and later are known to not work with v29.x of Bitcoin Core due to https://github.com/bitcoin-core/libmultiprocess/pull/160 --- depends/packages/native_libmultiprocess.mk | 4 ++-- doc/dependencies.md | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/depends/packages/native_libmultiprocess.mk b/depends/packages/native_libmultiprocess.mk index 4467dee76f..a76304f9f0 100644 --- a/depends/packages/native_libmultiprocess.mk +++ b/depends/packages/native_libmultiprocess.mk @@ -1,8 +1,8 @@ package=native_libmultiprocess -$(package)_version=1954f7f65661d49e700c344eae0fc8092decf975 +$(package)_version=v5.0 $(package)_download_path=https://github.com/bitcoin-core/libmultiprocess/archive $(package)_file_name=$($(package)_version).tar.gz -$(package)_sha256_hash=fc014bd74727c1d5d30b396813685012c965d079244dd07b53bc1c75c610a2cb +$(package)_sha256_hash=401984715b271a3446e1910f21adf048ba390d31cc93cc3073742e70d56fa3ea $(package)_dependencies=native_capnp define $(package)_config_cmds diff --git a/doc/dependencies.md b/doc/dependencies.md index d3f6b74367..a042f8f2ea 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -36,3 +36,7 @@ Bitcoin Core requires one of the following compilers. | [SQLite](../depends/packages/sqlite.mk) (wallet) | [link](https://sqlite.org) | [3.38.5](https://github.com/bitcoin/bitcoin/pull/25378) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No | | Python (scripts, tests) | [link](https://www.python.org) | N/A | [3.10](https://github.com/bitcoin/bitcoin/pull/30527) | No | | [systemtap](../depends/packages/systemtap.mk) ([tracing](tracing.md)) | [link](https://sourceware.org/systemtap/) | [4.8](https://github.com/bitcoin/bitcoin/pull/26945)| N/A | No | +| [capnproto](../depends/packages/capnp.mk) ([multiprocess](multiprocess.md)) | [link](https://capnproto.org/) | [1.2.0](https://github.com/bitcoin/bitcoin/pull/32760)| [0.7.0](https://github.com/bitcoin-core/libmultiprocess/pull/88) | No | +| [libmultiprocess](../depends/packages/libmultiprocess.mk) ([multiprocess](multiprocess.md)) | [link](https://github.com/bitcoin-core/libmultiprocess) | [5.0](https://github.com/bitcoin/bitcoin/pull/31945)| [v5.0-pre1](https://github.com/bitcoin/bitcoin/pull/31740)* | No | + +\* Libmultiprocess 5.x versions should be compatible, but 6.0 and later are not due to bitcoin-core/libmultiprocess#160. From abaf1e37a79bdf7481cef1cd5ae5e102fdce09be Mon Sep 17 00:00:00 2001 From: furszy Date: Wed, 11 Dec 2024 13:05:21 -0500 Subject: [PATCH 146/356] refactor: remove sqlite dir path back-and-forth conversion Github-Pull: bitcoin/bitcoin#31423 Rebased-From: d04f6a97ba9a55aa9455e1a805feeed4d630f59a --- src/wallet/sqlite.cpp | 6 +++--- src/wallet/sqlite.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp index a8c9f8a8ab..896a2fc0f3 100644 --- a/src/wallet/sqlite.cpp +++ b/src/wallet/sqlite.cpp @@ -112,12 +112,12 @@ Mutex SQLiteDatabase::g_sqlite_mutex; int SQLiteDatabase::g_sqlite_count = 0; SQLiteDatabase::SQLiteDatabase(const fs::path& dir_path, const fs::path& file_path, const DatabaseOptions& options, bool mock) - : WalletDatabase(), m_mock(mock), m_dir_path(fs::PathToString(dir_path)), m_file_path(fs::PathToString(file_path)), m_write_semaphore(1), m_use_unsafe_sync(options.use_unsafe_sync) + : WalletDatabase(), m_mock(mock), m_dir_path(dir_path), m_file_path(fs::PathToString(file_path)), m_write_semaphore(1), m_use_unsafe_sync(options.use_unsafe_sync) { { LOCK(g_sqlite_mutex); LogPrintf("Using SQLite Version %s\n", SQLiteDatabaseVersion()); - LogPrintf("Using wallet %s\n", m_dir_path); + LogPrintf("Using wallet %s\n", fs::PathToString(m_dir_path)); if (++g_sqlite_count == 1) { // Setup logging @@ -253,7 +253,7 @@ void SQLiteDatabase::Open() if (m_db == nullptr) { if (!m_mock) { - TryCreateDirectories(fs::PathFromString(m_dir_path)); + TryCreateDirectories(m_dir_path); } int ret = sqlite3_open_v2(m_file_path.c_str(), &m_db, flags, nullptr); if (ret != SQLITE_OK) { diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h index 78a3accf89..eb3c0217f5 100644 --- a/src/wallet/sqlite.h +++ b/src/wallet/sqlite.h @@ -105,7 +105,7 @@ class SQLiteDatabase : public WalletDatabase private: const bool m_mock{false}; - const std::string m_dir_path; + const fs::path m_dir_path; const std::string m_file_path; From 01c04d32aa3e1c323b304b1c6a573dd933b8b598 Mon Sep 17 00:00:00 2001 From: furszy Date: Wed, 11 Dec 2024 13:10:01 -0500 Subject: [PATCH 147/356] wallet: introduce method to return all db created files Github-Pull: bitcoin/bitcoin#31423 Rebased-From: 1de423e0a08bbc63eed36c8772e9ef8b48e80fb8 --- src/wallet/bdb.h | 15 +++++++++++++++ src/wallet/db.h | 3 +++ src/wallet/migrate.h | 1 + src/wallet/salvage.cpp | 1 + src/wallet/sqlite.h | 8 ++++++++ src/wallet/test/util.h | 1 + 6 files changed, 29 insertions(+) diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h index f3fe8a19c1..ec773fd177 100644 --- a/src/wallet/bdb.h +++ b/src/wallet/bdb.h @@ -132,6 +132,21 @@ class BerkeleyDatabase : public WalletDatabase /** Return path to main database filename */ std::string Filename() override { return fs::PathToString(env->Directory() / m_filename); } + std::vector Files() override + { + std::vector files; + files.emplace_back(env->Directory() / m_filename); + if (env->m_databases.size() == 1) { + files.emplace_back(env->Directory() / "db.log"); + files.emplace_back(env->Directory() / ".walletlock"); + files.emplace_back(env->Directory() / "database" / "log.0000000001"); + files.emplace_back(env->Directory() / "database"); + // Note that this list is not exhaustive as BDB may create more log files, and possibly other ones too + // However it should be good enough for the only calls to Files() + } + return files; + } + std::string Format() override { return "bdb"; } /** * Pointer to shared database environment. diff --git a/src/wallet/db.h b/src/wallet/db.h index e8790006a4..5f13ca29ff 100644 --- a/src/wallet/db.h +++ b/src/wallet/db.h @@ -170,6 +170,9 @@ class WalletDatabase /** Return path to main database file for logs and error messages. */ virtual std::string Filename() = 0; + /** Return paths to all database created files */ + virtual std::vector Files() = 0; + virtual std::string Format() = 0; std::atomic nUpdateCounter; diff --git a/src/wallet/migrate.h b/src/wallet/migrate.h index 16eadeb019..82359f9d4b 100644 --- a/src/wallet/migrate.h +++ b/src/wallet/migrate.h @@ -65,6 +65,7 @@ class BerkeleyRODatabase : public WalletDatabase /** Return path to main database file for logs and error messages. */ std::string Filename() override { return fs::PathToString(m_filepath); } + std::vector Files() override { return {m_filepath}; } std::string Format() override { return "bdb_ro"; } diff --git a/src/wallet/salvage.cpp b/src/wallet/salvage.cpp index b924239073..443f80893f 100644 --- a/src/wallet/salvage.cpp +++ b/src/wallet/salvage.cpp @@ -63,6 +63,7 @@ class DummyDatabase : public WalletDatabase void IncrementUpdateCounter() override { ++nUpdateCounter; } void ReloadDbEnv() override {} std::string Filename() override { return "dummy"; } + std::vector Files() override { return {}; } std::string Format() override { return "dummy"; } std::unique_ptr MakeBatch(bool flush_on_close = true) override { return std::make_unique(); } }; diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h index eb3c0217f5..c78cd29afc 100644 --- a/src/wallet/sqlite.h +++ b/src/wallet/sqlite.h @@ -166,6 +166,14 @@ class SQLiteDatabase : public WalletDatabase void IncrementUpdateCounter() override { ++nUpdateCounter; } std::string Filename() override { return m_file_path; } + /** Return paths to all database created files */ + std::vector Files() override + { + std::vector files; + files.emplace_back(m_dir_path / fs::PathFromString(m_file_path)); + files.emplace_back(m_dir_path / fs::PathFromString(m_file_path + "-journal")); + return files; + } std::string Format() override { return "sqlite"; } /** Make a SQLiteBatch connected to this database */ diff --git a/src/wallet/test/util.h b/src/wallet/test/util.h index b055c6c693..59e3a9c75f 100644 --- a/src/wallet/test/util.h +++ b/src/wallet/test/util.h @@ -123,6 +123,7 @@ class MockableDatabase : public WalletDatabase void ReloadDbEnv() override {} std::string Filename() override { return "mockable"; } + std::vector Files() override { return {}; } std::string Format() override { return "mock"; } std::unique_ptr MakeBatch(bool flush_on_close = true) override { return std::make_unique(m_records, m_pass); } }; From cc324aa2bed30afa713625dfb9cf83c438dd15c1 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Tue, 6 Jan 2026 16:09:38 -0800 Subject: [PATCH 148/356] wallettool: do not use fs::remove_all in createfromdump cleanup Github-Pull: bitcoin/bitcoin#34215 Rebased-From: f78f6f1dc8e16d5a8a23749e77bc3bf17c91ae42 --- src/wallet/dump.cpp | 8 +++++++- test/functional/tool_wallet.py | 12 ++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/src/wallet/dump.cpp b/src/wallet/dump.cpp index db2756e0ca..20aa5d453e 100644 --- a/src/wallet/dump.cpp +++ b/src/wallet/dump.cpp @@ -288,11 +288,17 @@ bool CreateFromDump(const ArgsManager& args, const std::string& name, const fs:: dump_file.close(); } + // On failure, gather the paths to remove + std::vector paths_to_remove = wallet->GetDatabase().Files(); + if (!name.empty()) paths_to_remove.push_back(wallet_path); + wallet.reset(); // The pointer deleter will close the wallet for us. // Remove the wallet dir if we have a failure if (!ret) { - fs::remove_all(wallet_path); + for (const auto& p : paths_to_remove) { + fs::remove(p); + } } return ret; diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py index c7abc2da8d..979804a5fe 100755 --- a/test/functional/tool_wallet.py +++ b/test/functional/tool_wallet.py @@ -409,6 +409,18 @@ def test_dump_createfromdump(self): self.write_dump(dump_data, bad_sum_wallet_dump) self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump') assert not (self.nodes[0].wallets_path / "badload").is_dir() + if not self.options.descriptors: + os.rename(self.nodes[0].wallets_path / "wallet.dat", self.nodes[0].wallets_path / "default.wallet.dat") + self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump') + assert self.nodes[0].wallets_path.exists() + assert not (self.nodes[0].wallets_path / "wallet.dat").exists() + + self.log.info('Checking createfromdump with an unnamed wallet') + self.do_tool_createfromdump("", "wallet.dump") + assert (self.nodes[0].wallets_path / "wallet.dat").exists() + os.unlink(self.nodes[0].wallets_path / "wallet.dat") + if not self.options.descriptors: + os.rename(self.nodes[0].wallets_path / "default.wallet.dat", self.nodes[0].wallets_path / "wallet.dat") def test_chainless_conflicts(self): self.log.info("Test wallet tool when wallet contains conflicting transactions") From d91f56e1e3f1aee99b0c09c23db70622ad6ed1b4 Mon Sep 17 00:00:00 2001 From: furszy Date: Mon, 5 Jan 2026 18:12:40 -0500 Subject: [PATCH 149/356] wallet: RestoreWallet failure, erase only what was created Track what RestoreWallet creates so only those files and directories are removed during a failure and nothing else. Preexisting paths must be left untouched. Note: Using fs::remove_all() instead of fs::remove() in RestoreWallet does not cause any problems currently, but the change is necessary for the next commit which extends RestoreWallet to work with existing directories, which may contain files that must not be deleted. Github-Pull: bitcoin/bitcoin#34156 Rebased-From: 4ed0693a3f2a427ef9e7ad016930ec29fa244995 --- src/wallet/wallet.cpp | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 09eda0c28e..32c902b768 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -501,6 +501,8 @@ std::shared_ptr RestoreWallet(WalletContext& context, const fs::path& b const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), fs::u8path(wallet_name)); auto wallet_file = wallet_path / "wallet.dat"; std::shared_ptr wallet; + bool wallet_file_copied = false; + bool created_parent_dir = false; try { if (!fs::exists(backup_file)) { @@ -509,13 +511,22 @@ std::shared_ptr RestoreWallet(WalletContext& context, const fs::path& b return nullptr; } - if (fs::exists(wallet_path) || !TryCreateDirectories(wallet_path)) { + if (fs::exists(wallet_path)) { error = Untranslated(strprintf("Failed to create database path '%s'. Database already exists.", fs::PathToString(wallet_path))); status = DatabaseStatus::FAILED_ALREADY_EXISTS; return nullptr; + } else { + // The directory doesn't exist, create it + if (!TryCreateDirectories(wallet_path)) { + error = Untranslated(strprintf("Failed to restore database path '%s'.", fs::PathToString(wallet_path))); + status = DatabaseStatus::FAILED_ALREADY_EXISTS; + return nullptr; + } + created_parent_dir = true; } fs::copy_file(backup_file, wallet_file, fs::copy_options::none); + wallet_file_copied = true; if (load_after_restore) { wallet = LoadWallet(context, wallet_name, load_on_start, options, status, error, warnings); @@ -528,7 +539,13 @@ std::shared_ptr RestoreWallet(WalletContext& context, const fs::path& b // Remove created wallet path only when loading fails if (load_after_restore && !wallet) { - fs::remove_all(wallet_path); + if (wallet_file_copied) fs::remove(wallet_file); + // Clean up the parent directory if we created it during restoration. + // As we have created it, it must be empty after deleting the wallet file. + if (created_parent_dir) { + Assume(fs::is_empty(wallet_path)); + fs::remove(wallet_path); + } } return wallet; From a074d36254ab0c666f0438fe071cd213715f82de Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 26 Dec 2025 20:22:55 -0500 Subject: [PATCH 150/356] wallet: fix unnamed wallet migration failure When migrating any legacy unnamed wallet, a failed migration would cause the cleanup logic to remove its parent directory. Since this type of legacy wallet lives directly in the main '/wallets/' folder, this resulted in unintentionally erasing all wallets, including the backup file. To be fully safe, we will no longer call `fs::remove_all`. Instead, we only erase the individual db files we have created, leaving everything else intact. The created wallets parent directories are erased only if they are empty. As part of this last change, `RestoreWallet` was modified to allow an existing directory as the destination, since we no longer remove the original wallet directory (we only remove the files we created inside it). This also fixes the restore of top-level default wallets during failures, which were failing due to the directory existence check that always returns true for the /wallets/ directory. This bug started after: https://github.com/bitcoin/bitcoin/commit/f6ee59b6e2995a3916fb4f0d4cbe15ece2054494 Previously, the `fs::copy_file` call was failing for top-level wallets, which prevented the `fs::remove_all` call from being reached. Github-Pull: bitcoin/bitcoin#34156 Rebased-From: f4c7e28e80bf9af50b03a770b641fd309a801589 --- src/wallet/wallet.cpp | 72 +++++++++++++++++++++++--------- test/functional/wallet_backup.py | 2 +- 2 files changed, 54 insertions(+), 20 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 32c902b768..119a99d601 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -511,10 +511,22 @@ std::shared_ptr RestoreWallet(WalletContext& context, const fs::path& b return nullptr; } + // Wallet directories are allowed to exist, but must not contain a .dat file. + // Any existing wallet database is treated as a hard failure to prevent overwriting. if (fs::exists(wallet_path)) { - error = Untranslated(strprintf("Failed to create database path '%s'. Database already exists.", fs::PathToString(wallet_path))); - status = DatabaseStatus::FAILED_ALREADY_EXISTS; - return nullptr; + // If this is a file, it is the db and we don't want to overwrite it. + if (!fs::is_directory(wallet_path)) { + error = Untranslated(strprintf("Failed to restore wallet. Database file exists '%s'.", fs::PathToString(wallet_path))); + status = DatabaseStatus::FAILED_ALREADY_EXISTS; + return nullptr; + } + + // Check we are not going to overwrite an existing db file + if (fs::exists(wallet_file)) { + error = Untranslated(strprintf("Failed to restore wallet. Database file exists in '%s'.", fs::PathToString(wallet_file))); + status = DatabaseStatus::FAILED_ALREADY_EXISTS; + return nullptr; + } } else { // The directory doesn't exist, create it if (!TryCreateDirectories(wallet_path)) { @@ -4559,26 +4571,43 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr } } - // In case of reloading failure, we need to remember the wallet dirs to remove - // Set is used as it may be populated with the same wallet directory paths multiple times, - // both before and after reloading. This ensures the set is complete even if one of the wallets - // fails to reload. - std::set wallet_dirs; + // In case of loading failure, we need to remember the wallet files we have created to remove. + // A `set` is used as it may be populated with the same wallet directory paths multiple times, + // both before and after loading. This ensures the set is complete even if one of the wallets + // fails to load. + std::set wallet_files_to_remove; + std::set wallet_empty_dirs_to_remove; + + // Helper to track wallet files and directories for cleanup on failure. + // Only directories of wallets created during migration (not the main wallet) are tracked. + auto track_for_cleanup = [&](const CWallet& wallet) { + const auto files = wallet.GetDatabase().Files(); + wallet_files_to_remove.insert(files.begin(), files.end()); + if (wallet.GetName() != wallet_name) { + // If this isn’t the main wallet, mark its directory for removal. + // This applies to the watch-only and solvable wallets. + // Wallets stored directly as files in the top-level directory + // (e.g. default unnamed wallets) don’t have a removable parent directory. + wallet_empty_dirs_to_remove.insert(fs::PathFromString(wallet.GetDatabase().Filename()).parent_path()); + } + }; + + if (success) { // Migration successful, unload all wallets locally, then reload them. // Reload the main wallet - wallet_dirs.insert(fs::PathFromString(local_wallet->GetDatabase().Filename()).parent_path()); + track_for_cleanup(*local_wallet); success = reload_wallet(local_wallet); res.wallet = local_wallet; res.wallet_name = wallet_name; if (success && res.watchonly_wallet) { // Reload watchonly - wallet_dirs.insert(fs::PathFromString(res.watchonly_wallet->GetDatabase().Filename()).parent_path()); + track_for_cleanup(*res.watchonly_wallet); success = reload_wallet(res.watchonly_wallet); } if (success && res.solvables_wallet) { // Reload solvables - wallet_dirs.insert(fs::PathFromString(res.solvables_wallet->GetDatabase().Filename()).parent_path()); + track_for_cleanup(*res.solvables_wallet); success = reload_wallet(res.solvables_wallet); } } @@ -4586,7 +4615,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // Migration failed, cleanup // Before deleting the wallet's directory, copy the backup file to the top-level wallets dir fs::path temp_backup_location = fsbridge::AbsPathJoin(GetWalletDir(), backup_filename); - fs::copy_file(backup_path, temp_backup_location, fs::copy_options::none); + fs::rename(backup_path, temp_backup_location); // Make list of wallets to cleanup std::vector> created_wallets; @@ -4595,8 +4624,8 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr if (res.solvables_wallet) created_wallets.push_back(std::move(res.solvables_wallet)); // Get the directories to remove after unloading - for (std::shared_ptr& w : created_wallets) { - wallet_dirs.emplace(fs::PathFromString(w->GetDatabase().Filename()).parent_path()); + for (std::shared_ptr& wallet : created_wallets) { + track_for_cleanup(*wallet); } // Unload the wallets @@ -4615,9 +4644,15 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr } } - // Delete the wallet directories - for (const fs::path& dir : wallet_dirs) { - fs::remove_all(dir); + // First, delete the db files we have created throughout this process and nothing else + for (const fs::path& file : wallet_files_to_remove) { + fs::remove(file); + } + + // Second, delete the created wallet directories and nothing else. They must be empty at this point. + for (const fs::path& dir : wallet_empty_dirs_to_remove) { + Assume(fs::is_empty(dir)); + fs::remove(dir); } // Restore the backup @@ -4631,8 +4666,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr } // The wallet directory has been restored, but just in case, copy the previously created backup to the wallet dir - fs::copy_file(temp_backup_location, backup_path, fs::copy_options::none); - fs::remove(temp_backup_location); + fs::rename(temp_backup_location, backup_path); // Verify that there is no dangling wallet: when the wallet wasn't loaded before, expect null. // This check is performed after restoration to avoid an early error before saving the backup. diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py index 7c88f64dcf..7ad83bdf87 100755 --- a/test/functional/wallet_backup.py +++ b/test/functional/wallet_backup.py @@ -136,7 +136,7 @@ def restore_wallet_existent_name(self): backup_file = self.nodes[0].datadir_path / 'wallet.bak' wallet_name = "res0" wallet_file = node.wallets_path / wallet_name - error_message = "Failed to create database path '{}'. Database already exists.".format(wallet_file) + error_message = "Failed to restore wallet. Database file exists in '{}'.".format(wallet_file / "wallet.dat") assert_raises_rpc_error(-36, error_message, node.restorewallet, wallet_name, backup_file) assert wallet_file.exists() From 833848e9b8eab430629da116f753f8d4433f51e2 Mon Sep 17 00:00:00 2001 From: furszy Date: Fri, 26 Dec 2025 20:23:02 -0500 Subject: [PATCH 151/356] test: add coverage for unnamed wallet migration failure Verifies that a failed migration of the unnamed (default) wallet does not erase the main /wallets/ directory, and also that the backup file exists. Github-Pull: bitcoin/bitcoin#34156 Rebased-From: 36093bde63286e19821a9e62cdff1712b6245dc7 --- test/functional/wallet_migration.py | 35 +++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index ce8dc19460..3ca053043b 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -4,6 +4,7 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test Migrating a wallet from legacy to descriptor.""" +import os import random import shutil import struct @@ -548,6 +549,39 @@ def test_default_wallet(self): self.master_node.setmocktime(0) + def test_default_wallet_failure(self): + self.log.info("Test failure during unnamed (default) wallet migration") + master_wallet = self.master_node.get_wallet_rpc(self.default_wallet_name) + wallet = self.create_legacy_wallet("", blank=True) + wallet.importaddress(master_wallet.getnewaddress(address_type="legacy")) + + # Create wallet directory with the watch-only name and a wallet file. + # Because the wallet dir exists, this will cause migration to fail. + watch_only_dir = self.master_node.wallets_path / "_watchonly" + os.mkdir(watch_only_dir) + shutil.copyfile(self.old_node.wallets_path / "wallet.dat", watch_only_dir / "wallet.dat") + + mocked_time = int(time.time()) + self.master_node.setmocktime(mocked_time) + assert_raises_rpc_error(-4, "Failed to create database", self.migrate_and_get_rpc, "") + self.master_node.setmocktime(0) + + # Verify the /wallets/ path exists + assert self.master_node.wallets_path.exists() + # Check backup file exists. Because the wallet has no name, the backup is prefixed with 'default_wallet' + backup_path = self.master_node.wallets_path / f"default_wallet_{mocked_time}.legacy.bak" + assert backup_path.exists() + # Verify the original unnamed wallet was restored + assert (self.master_node.wallets_path / "wallet.dat").exists() + # And verify it is still a BDB wallet + with open(self.master_node.wallets_path / "wallet.dat", "rb") as f: + data = f.read(16) + _, _, magic = struct.unpack("QII", data) + assert_equal(magic, BTREE_MAGIC) + + # Test cleanup: clear default wallet for next test + os.remove(self.old_node.wallets_path / "wallet.dat") + def test_direct_file(self): self.log.info("Test migration of a wallet that is not in a wallet directory") wallet = self.create_legacy_wallet("plainfile") @@ -1372,6 +1406,7 @@ def run_test(self): self.test_encrypted() self.test_nonexistent() self.test_unloaded_by_path() + self.test_default_wallet_failure() self.test_default_wallet() self.test_direct_file() self.test_addressbook() From 9ea84c08d7e24ffefa6f18d6bd2af28ec38cfd98 Mon Sep 17 00:00:00 2001 From: furszy Date: Mon, 5 Jan 2026 16:08:13 -0500 Subject: [PATCH 152/356] test: restorewallet, coverage for existing dirs, unnamed wallet and prune failure The first test verifies that restoring into an existing empty directory or a directory with no .dat db files succeeds, while restoring into a dir with a .dat file fails. The second test covers restoring into the default unnamed wallet (wallet.dat), which also implicitly exercises the recovery path used after a failed migration. The third test covers failure during restore on a prune node. When the wallet last sync was beyond the pruning height. Github-Pull: bitcoin/bitcoin#34156 Rebased-From: f011e0f0680a8c39988ae57dae57eb86e92dd449 --- test/functional/wallet_backup.py | 77 ++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py index 7ad83bdf87..3ff798d3d3 100755 --- a/test/functional/wallet_backup.py +++ b/test/functional/wallet_backup.py @@ -40,6 +40,7 @@ from test_framework.util import ( assert_equal, assert_raises_rpc_error, + sha256sum_file, ) @@ -140,6 +141,67 @@ def restore_wallet_existent_name(self): assert_raises_rpc_error(-36, error_message, node.restorewallet, wallet_name, backup_file) assert wallet_file.exists() + def test_restore_existent_dir(self): + self.log.info("Test restore on an existent empty directory") + node = self.nodes[3] + backup_file = self.nodes[0].datadir_path / 'wallet.bak' + wallet_name = "restored_wallet" + wallet_dir = node.wallets_path / wallet_name + os.mkdir(wallet_dir) + res = node.restorewallet(wallet_name, backup_file) + assert_equal(res['name'], wallet_name) + node.unloadwallet(wallet_name) + + self.log.info("Test restore succeeds when the target directory contains non-wallet files") + wallet_file = node.wallets_path / wallet_name / "wallet.dat" + os.remove(wallet_file) + extra_file = node.wallets_path / wallet_name / "not_a_wallet.txt" + extra_file.touch() + res = node.restorewallet(wallet_name, backup_file) + assert_equal(res['name'], wallet_name) + assert extra_file.exists() # extra file was not removed by mistake + node.unloadwallet(wallet_name) + + self.log.info("Test restore failure due to existing db file in the destination directory") + original_shasum = sha256sum_file(wallet_file) + error_message = "Failed to restore wallet. Database file exists in '{}'.".format(wallet_dir / "wallet.dat") + assert_raises_rpc_error(-36, error_message, node.restorewallet, wallet_name, backup_file) + # Ensure the wallet file remains untouched + assert wallet_dir.exists() + assert_equal(original_shasum, sha256sum_file(wallet_file)) + + self.log.info("Test restore succeeds when the .dat file in the destination has a different name") + second_wallet = wallet_dir / "hidden_storage.dat" + os.rename(wallet_dir / "wallet.dat", second_wallet) + original_shasum = sha256sum_file(second_wallet) + res = node.restorewallet(wallet_name, backup_file) + assert_equal(res['name'], wallet_name) + assert (wallet_dir / "hidden_storage.dat").exists() + assert_equal(original_shasum, sha256sum_file(second_wallet)) + node.unloadwallet(wallet_name) + + # Clean for follow-up tests + os.remove(wallet_file) + + def test_restore_into_unnamed_wallet(self): + self.log.info("Test restore into a default unnamed wallet") + # This is also useful to test the migration recovery after failure logic + node = self.nodes[3] + if not self.options.descriptors: + node.unloadwallet("") + os.rename(node.wallets_path / "wallet.dat", node.wallets_path / "default.wallet.dat") + backup_file = self.nodes[0].datadir_path / 'wallet.bak' + wallet_name = "" + res = node.restorewallet(wallet_name, backup_file) + assert_equal(res['name'], "") + assert (node.wallets_path / "wallet.dat").exists() + # Clean for follow-up tests + node.unloadwallet("") + os.remove(node.wallets_path / "wallet.dat") + if not self.options.descriptors: + os.rename(node.wallets_path / "default.wallet.dat", node.wallets_path / "wallet.dat") + node.loadwallet("") + def test_pruned_wallet_backup(self): self.log.info("Test loading backup on a pruned node when the backup was created close to the prune height of the restoring node") node = self.nodes[3] @@ -159,6 +221,19 @@ def test_pruned_wallet_backup(self): # the backup to load successfully this close to the prune height node.restorewallet('pruned', node.datadir_path / 'wallet_pruned.bak') + self.log.info("Test restore on a pruned node when the backup was beyond the pruning point") + if not self.options.descriptors: + node.unloadwallet("") + os.rename(node.wallets_path / "wallet.dat", node.wallets_path / "default.wallet.dat") + backup_file = self.nodes[0].datadir_path / 'wallet.bak' + wallet_name = "" + error_message = "Wallet loading failed. Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)" + assert_raises_rpc_error(-4, error_message, node.restorewallet, wallet_name, backup_file) + assert node.wallets_path.exists() # ensure the wallets dir exists + if not self.options.descriptors: + os.rename(node.wallets_path / "default.wallet.dat", node.wallets_path / "wallet.dat") + node.loadwallet("") + def run_test(self): self.log.info("Generating initial blockchain") self.generate(self.nodes[0], 1) @@ -227,6 +302,8 @@ def run_test(self): assert_equal(res2_rpc.getbalance(), balance2) self.restore_wallet_existent_name() + self.test_restore_existent_dir() + self.test_restore_into_unnamed_wallet() if not self.options.descriptors: self.log.info("Restoring using dumped wallet") From a7e2d106db8f193259420bacbccec80ba3beebf1 Mon Sep 17 00:00:00 2001 From: furszy Date: Sat, 27 Dec 2025 13:54:59 -0500 Subject: [PATCH 153/356] wallet: improve post-migration logging Right now, after migration the last message users see is "migration completed", but the migration isn't actually finished yet. We still need to load the new wallets to ensure consistency, and if that fails, the migration will be rolled back. This can be confusing for users. This change logs the post-migration loading step and if a wallet fails to load and the migration will be rolled back. Github-Pull: bitcoin/bitcoin#34156 Rebased-From: d70b159c42008ac3b63d1c43d99d4f1316d2f1ef --- src/wallet/wallet.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 119a99d601..b54cf0be0f 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4515,7 +4515,12 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr std::string name = to_reload->GetName(); to_reload.reset(); to_reload = LoadWallet(context, name, /*load_on_start=*/std::nullopt, options, status, error, warnings); - return to_reload != nullptr; + if (!to_reload) { + LogError("Failed to load wallet '%s' after migration. Rolling back migration to preserve consistency. " + "Error cause: %s\n", wallet_name, error.original); + return false; + } + return true; }; // Before anything else, check if there is something to migrate. @@ -4596,6 +4601,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr if (success) { // Migration successful, unload all wallets locally, then reload them. // Reload the main wallet + LogInfo("Loading new wallets after migration...\n"); track_for_cleanup(*local_wallet); success = reload_wallet(local_wallet); res.wallet = local_wallet; From 5e8ad98163af9749e7a3c44a9107cc241c5bd7ab Mon Sep 17 00:00:00 2001 From: furszy Date: Sat, 27 Dec 2025 14:32:11 -0500 Subject: [PATCH 154/356] wallet: migration, fix watch-only and solvables wallets names Because the default wallet has no name, the watch-only and solvables wallets created during migration end up having no name either. This fixes it by applying the same prefix name we use for the backup file for an unnamed default wallet. Before: watch-only wallet named "_watchonly" After: watch-only wallet named "default_wallet_watchonly" Github-Pull: bitcoin/bitcoin#34156 Rebased-From: 82caa8193a3e36f248dcc949e0cd41def191efac --- src/wallet/wallet.cpp | 15 ++++++++-- test/functional/wallet_migration.py | 45 +++++++++++++++++++++++++++-- 2 files changed, 55 insertions(+), 5 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index b54cf0be0f..2397d84a6f 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4326,6 +4326,15 @@ bool CWallet::CanGrindR() const return !IsWalletFlagSet(WALLET_FLAG_EXTERNAL_SIGNER); } +// Returns wallet prefix for migration. +// Used to name the backup file and newly created wallets. +// E.g. a watch-only wallet is named "_watchonly". +static std::string MigrationPrefixName(CWallet& wallet) +{ + const std::string& name{wallet.GetName()}; + return name.empty() ? "default_wallet" : name; +} + bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error, MigrationResult& res) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet) { AssertLockHeld(wallet.cs_wallet); @@ -4357,7 +4366,7 @@ bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error, DatabaseStatus status; std::vector warnings; - std::string wallet_name = wallet.GetName() + "_watchonly"; + std::string wallet_name = MigrationPrefixName(wallet) + "_watchonly"; std::unique_ptr database = MakeWalletDatabase(wallet_name, options, status, error); if (!database) { error = strprintf(_("Wallet file creation failed: %s"), error); @@ -4394,7 +4403,7 @@ bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error, DatabaseStatus status; std::vector warnings; - std::string wallet_name = wallet.GetName() + "_solvables"; + std::string wallet_name = MigrationPrefixName(wallet) + "_solvables"; std::unique_ptr database = MakeWalletDatabase(wallet_name, options, status, error); if (!database) { error = strprintf(_("Wallet file creation failed: %s"), error); @@ -4533,7 +4542,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // Make a backup of the DB fs::path this_wallet_dir = fs::absolute(fs::PathFromString(local_wallet->GetDatabase().Filename())).parent_path(); - fs::path backup_filename = fs::PathFromString(strprintf("%s_%d.legacy.bak", (wallet_name.empty() ? "default_wallet" : wallet_name), GetTime())); + fs::path backup_filename = fs::PathFromString(strprintf("%s_%d.legacy.bak", MigrationPrefixName(*local_wallet), GetTime())); fs::path backup_path = this_wallet_dir / backup_filename; if (!local_wallet->BackupWallet(fs::PathToString(backup_path))) { if (was_loaded) { diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 3ca053043b..c11986226d 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -5,6 +5,7 @@ """Test Migrating a wallet from legacy to descriptor.""" import os +from pathlib import Path import random import shutil import struct @@ -25,6 +26,7 @@ from test_framework.script_util import key_to_p2pkh_script, key_to_p2pk_script, script_to_p2sh_script, script_to_p2wsh_script from test_framework.util import ( assert_equal, + assert_greater_than, assert_raises_rpc_error, find_vout_for_address, sha256sum_file, @@ -523,6 +525,14 @@ def test_unloaded_by_path(self): assert_equal(bals, wallet.getbalances()) + def clear_default_wallet(self, backup_file): + # Test cleanup: Clear unnamed default wallet for subsequent tests + (self.old_node.wallets_path / "wallet.dat").unlink() + (self.master_node.wallets_path / "wallet.dat").unlink(missing_ok=True) + shutil.rmtree(self.master_node.wallets_path / "default_wallet_watchonly", ignore_errors=True) + shutil.rmtree(self.master_node.wallets_path / "default_wallet_solvables", ignore_errors=True) + backup_file.unlink() + def test_default_wallet(self): self.log.info("Test migration of the wallet named as the empty string") wallet = self.create_legacy_wallet("") @@ -549,6 +559,36 @@ def test_default_wallet(self): self.master_node.setmocktime(0) + wallet.unloadwallet() + self.clear_default_wallet(backup_file=Path(res["backup_path"])) + + def test_default_wallet_watch_only(self): + self.log.info("Test unnamed (default) watch-only wallet migration") + master_wallet = self.master_node.get_wallet_rpc(self.default_wallet_name) + wallet = self.create_legacy_wallet("", blank=True) + wallet.importaddress(master_wallet.getnewaddress(address_type="legacy")) + + res, def_wallet = self.migrate_and_get_rpc("") + wallet = self.master_node.get_wallet_rpc("default_wallet_watchonly") + + info = wallet.getwalletinfo() + assert_equal(info["descriptors"], True) + assert_equal(info["format"], "sqlite") + assert_equal(info["private_keys_enabled"], False) + assert_equal(info["walletname"], "default_wallet_watchonly") + + # The default wallet will still exist and have newly generated descriptors + assert (self.master_node.wallets_path / "wallet.dat").exists() + def_wallet_info = def_wallet.getwalletinfo() + assert_equal(def_wallet_info["descriptors"], True) + assert_equal(def_wallet_info["format"], "sqlite") + assert_equal(def_wallet_info["private_keys_enabled"], True) + assert_equal(def_wallet_info["walletname"], "") + assert_greater_than(def_wallet_info["keypoolsize"], 0) + + wallet.unloadwallet() + self.clear_default_wallet(backup_file=Path(res["backup_path"])) + def test_default_wallet_failure(self): self.log.info("Test failure during unnamed (default) wallet migration") master_wallet = self.master_node.get_wallet_rpc(self.default_wallet_name) @@ -557,7 +597,7 @@ def test_default_wallet_failure(self): # Create wallet directory with the watch-only name and a wallet file. # Because the wallet dir exists, this will cause migration to fail. - watch_only_dir = self.master_node.wallets_path / "_watchonly" + watch_only_dir = self.master_node.wallets_path / "default_wallet_watchonly" os.mkdir(watch_only_dir) shutil.copyfile(self.old_node.wallets_path / "wallet.dat", watch_only_dir / "wallet.dat") @@ -580,7 +620,7 @@ def test_default_wallet_failure(self): assert_equal(magic, BTREE_MAGIC) # Test cleanup: clear default wallet for next test - os.remove(self.old_node.wallets_path / "wallet.dat") + self.clear_default_wallet(backup_path) def test_direct_file(self): self.log.info("Test migration of a wallet that is not in a wallet directory") @@ -1408,6 +1448,7 @@ def run_test(self): self.test_unloaded_by_path() self.test_default_wallet_failure() self.test_default_wallet() + self.test_default_wallet_watch_only() self.test_direct_file() self.test_addressbook() self.test_migrate_raw_p2sh() From 9405e915e79d86d262779ea38104624d37add2a3 Mon Sep 17 00:00:00 2001 From: furszy Date: Sun, 4 Jan 2026 12:25:21 -0500 Subject: [PATCH 155/356] test: coverage for migration failure when last sync is beyond prune height Github-Pull: bitcoin/bitcoin#34156 Rebased-From: b7c34d08dd9549a95cffc6ec1ffa4bb4f81e35eb --- test/functional/wallet_migration.py | 39 +++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index c11986226d..5424fda2ab 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -1431,6 +1431,42 @@ def test_solvable_no_privs(self): assert_equal(addr_info["solvable"], True) assert "hex" in addr_info + def unsynced_wallet_on_pruned_node_fails(self): + self.log.info("Test migration of an unsynced wallet on a pruned node fails gracefully") + wallet = self.create_legacy_wallet("", load_on_startup=False) + last_wallet_synced_block = wallet.getwalletinfo()['lastprocessedblock']['height'] + wallet.unloadwallet() + + shutil.copyfile(self.old_node.wallets_path / "wallet.dat", self.master_node.wallets_path / "wallet.dat") + + # Generate blocks just so the wallet best block is pruned + self.restart_node(0, ["-fastprune", "-prune=1", "-nowallet"]) + self.connect_nodes(0, 1) + self.generate(self.master_node, 450, sync_fun=self.no_op) + self.master_node.pruneblockchain(250) + # Ensure next block to sync is unavailable + assert_raises_rpc_error(-1, "Block not available (pruned data)", self.master_node.getblock, self.master_node.getblockhash(last_wallet_synced_block + 1)) + + # Check migration failure + mocked_time = int(time.time()) + self.master_node.setmocktime(mocked_time) + assert_raises_rpc_error(-4, "last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)", self.master_node.migratewallet, wallet_name="") + self.master_node.setmocktime(0) + + # Verify the /wallets/ path exists, the wallet is still BDB and the backup file is there. + assert self.master_node.wallets_path.exists() + + with open(self.master_node.wallets_path / "wallet.dat", "rb") as f: + data = f.read(16) + _, _, magic = struct.unpack("QII", data) + assert_equal(magic, BTREE_MAGIC) + + backup_path = self.master_node.wallets_path / f"default_wallet_{mocked_time}.legacy.bak" + assert backup_path.exists() + + self.clear_default_wallet(backup_path) + + def run_test(self): self.master_node = self.nodes[0] self.old_node = self.nodes[1] @@ -1466,5 +1502,8 @@ def run_test(self): self.test_taproot() self.test_solvable_no_privs() + # Note: After this test the first 250 blocks of 'master_node' are pruned + self.unsynced_wallet_on_pruned_node_fails() + if __name__ == '__main__': WalletMigrationTest(__file__).main() From 76cdeb7b06232050c7d20ffa1395697cc4e53295 Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Wed, 7 Jan 2026 16:02:58 -0800 Subject: [PATCH 156/356] wallet: test: Failed migration cleanup Refactor a common way to perform the failed migration test that exists for default wallets, and add relative-path wallets and absolute-path wallets. Github-Pull: 34226 Rebased-From: eeaf28dbe0e09819ab0e95bb7762b29536bdeef6 --- test/functional/wallet_migration.py | 79 ++++++++++++++++++++++------- 1 file changed, 61 insertions(+), 18 deletions(-) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 5424fda2ab..8129baf438 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -589,38 +589,72 @@ def test_default_wallet_watch_only(self): wallet.unloadwallet() self.clear_default_wallet(backup_file=Path(res["backup_path"])) - def test_default_wallet_failure(self): - self.log.info("Test failure during unnamed (default) wallet migration") + def test_migration_failure(self, wallet_name): + is_default = wallet_name == "" + wallet_pretty_name = "unnamed (default)" if is_default else f'"{wallet_name}"' + self.log.info(f"Test failure during migration of wallet named: {wallet_pretty_name}") + # Preface, set up legacy wallet and unload it master_wallet = self.master_node.get_wallet_rpc(self.default_wallet_name) - wallet = self.create_legacy_wallet("", blank=True) + wallet = self.create_legacy_wallet(wallet_name, blank=True) wallet.importaddress(master_wallet.getnewaddress(address_type="legacy")) + wallet.unloadwallet() - # Create wallet directory with the watch-only name and a wallet file. - # Because the wallet dir exists, this will cause migration to fail. - watch_only_dir = self.master_node.wallets_path / "default_wallet_watchonly" + if os.path.isabs(wallet_name): + old_path = master_path = Path(wallet_name) + else: + old_path = self.old_node.wallets_path / wallet_name + master_path = self.master_node.wallets_path / wallet_name + os.makedirs(master_path, exist_ok=True) + shutil.copyfile(old_path / "wallet.dat", master_path / "wallet.dat") + + # This will be the watch-only directory the migration tries to create, + # we make migration fail by placing a wallet.dat file there. + wo_prefix = wallet_name or "default_wallet" + # wo_prefix might have path characters in it, this corresponds with + # DoMigration(). + wo_dirname = f"{wo_prefix}_watchonly" + watch_only_dir = self.master_node.wallets_path / wo_dirname os.mkdir(watch_only_dir) - shutil.copyfile(self.old_node.wallets_path / "wallet.dat", watch_only_dir / "wallet.dat") + shutil.copyfile(old_path / "wallet.dat", watch_only_dir / "wallet.dat") + + # Make a file in the wallets dir that must still exist after migration + survive_path = self.master_node.wallets_path / "survive" + open(survive_path, "wb").close() + assert survive_path.exists() mocked_time = int(time.time()) self.master_node.setmocktime(mocked_time) - assert_raises_rpc_error(-4, "Failed to create database", self.migrate_and_get_rpc, "") + assert_raises_rpc_error(-4, "Failed to create database", self.master_node.migratewallet, wallet_name) self.master_node.setmocktime(0) - # Verify the /wallets/ path exists + # Verify the /wallets/ path exists. assert self.master_node.wallets_path.exists() - # Check backup file exists. Because the wallet has no name, the backup is prefixed with 'default_wallet' - backup_path = self.master_node.wallets_path / f"default_wallet_{mocked_time}.legacy.bak" + + # Verify survive is still there + assert survive_path.exists() + # Verify both wallet paths exist. + assert Path(old_path / "wallet.dat").exists() + assert Path(master_path / "wallet.dat").exists() + + backup_prefix = "default_wallet" if is_default else wallet_name + backup_path = master_path / f"{backup_prefix}_{mocked_time}.legacy.bak" assert backup_path.exists() - # Verify the original unnamed wallet was restored - assert (self.master_node.wallets_path / "wallet.dat").exists() - # And verify it is still a BDB wallet - with open(self.master_node.wallets_path / "wallet.dat", "rb") as f: + + with open(self.master_node.wallets_path / wallet_name / self.wallet_data_filename, "rb") as f: data = f.read(16) _, _, magic = struct.unpack("QII", data) assert_equal(magic, BTREE_MAGIC) - # Test cleanup: clear default wallet for next test - self.clear_default_wallet(backup_path) + + # Cleanup + if is_default: + self.clear_default_wallet(backup_path) + else: + backup_path.unlink() + Path(watch_only_dir / "wallet.dat").unlink() + Path(watch_only_dir).rmdir() + Path(master_path / "wallet.dat").unlink() + Path(old_path / "wallet.dat").unlink(missing_ok=True) def test_direct_file(self): self.log.info("Test migration of a wallet that is not in a wallet directory") @@ -1482,7 +1516,16 @@ def run_test(self): self.test_encrypted() self.test_nonexistent() self.test_unloaded_by_path() - self.test_default_wallet_failure() + + migration_failure_cases = [ + "", + "../", + os.path.abspath(self.master_node.datadir_path / "absolute_path"), + "normallynamedwallet" + ] + for wallet_name in migration_failure_cases: + self.test_migration_failure(wallet_name=wallet_name) + self.test_default_wallet() self.test_default_wallet_watch_only() self.test_direct_file() From e4031864225b37f59468900e53d1ad70cf0e561b Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Sat, 10 Jan 2026 06:48:42 +0000 Subject: [PATCH 157/356] Bugfix: GUI: Queue stylesheet changes within eventFilters Some Qt styles (Breeze in particular) add event handlers in setStyleSheet, which causes the eventFilter to run infinitely --- src/qt/bitcoinamountfield.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/qt/bitcoinamountfield.cpp b/src/qt/bitcoinamountfield.cpp index 19cd5655cc..f27b8724c1 100644 --- a/src/qt/bitcoinamountfield.cpp +++ b/src/qt/bitcoinamountfield.cpp @@ -73,10 +73,11 @@ bool BitcoinAmountField::validate() void BitcoinAmountField::setValid(bool valid) { - if (valid) - amount->setStyleSheet(""); - else - amount->setStyleSheet(STYLE_INVALID); + const QString style = valid ? QString() : QStringLiteral(STYLE_INVALID); + if (amount->styleSheet() != style) { + // CAUTION: Some Qt styles (Breeze in particular) add event handlers in setStyleSheet, which causes the eventFilter to run infinitely; use a QueuedConnection to change it outside of the eventFilter instead + QMetaObject::invokeMethod(amount, "setStyleSheet", Qt::QueuedConnection, Q_ARG(QString, style)); + } } QString BitcoinAmountField::text() const From 032391cb6e60cbdf956052e01bb9bdfb2a871bfd Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Sun, 14 Dec 2025 13:50:23 +0100 Subject: [PATCH 158/356] netif: fix compilation warning in QueryDefaultGatewayImpl() ``` src/common/netif.cpp:137:51: error: comparison of integers of different signs: 'int64_t' (aka 'long') and 'unsigned long' [-Werror,-Wsign-compare] 137 | for (nlmsghdr* hdr = (nlmsghdr*)response; NLMSG_OK(hdr, recv_result); hdr = NLMSG_NEXT(hdr, recv_result)) { | ^~~~~~~~~~~~~~~~~~~~~~~~~~ /usr/include/netlink/netlink.h:220:31: note: expanded from macro 'NLMSG_OK' 220 | #define NLMSG_OK(_hdr, _len) NL_ITEM_OK(_hdr, _len, NLMSG_HDRLEN, _NLMSG_LEN) | ^ ~~~~ ~~~~~~~~~~~~ /usr/include/netlink/netlink.h:203:10: note: expanded from macro 'NL_ITEM_OK' 203 | ((_len) >= _hlen && _LEN_M(_ptr) >= _hlen && _LEN_M(_ptr) <= (_len)) | ~~~~ ^ ~~~~~ 1 error generated. ``` Happens on FreeBSD 15.0, with the default compiler (Clang 19). On FreeBSD 14, `/usr/include/netlink/netlink.h` contains: ``` #define NLMSG_HDRLEN ((int)sizeof(struct nlmsghdr)) ``` On FreeBSD 15, `/usr/include/netlink/netlink.h` contains: ``` #define NLMSG_HDRLEN (sizeof(struct nlmsghdr)) ``` Github-Pull: #34093 Rebased-From: be2a6248fbc6ade0c0cd734245138f83f3202266 --- src/common/netif.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/common/netif.cpp b/src/common/netif.cpp index 7424f977c7..7c85db28cf 100644 --- a/src/common/netif.cpp +++ b/src/common/netif.cpp @@ -93,7 +93,12 @@ std::optional QueryDefaultGatewayImpl(sa_family_t family) return std::nullopt; } - for (nlmsghdr* hdr = (nlmsghdr*)response; NLMSG_OK(hdr, recv_result); hdr = NLMSG_NEXT(hdr, recv_result)) { +#if defined(__FreeBSD_version) && __FreeBSD_version >= 1500029 + using recv_result_t = size_t; +#else + using recv_result_t = int64_t; +#endif + for (nlmsghdr* hdr = (nlmsghdr*)response; NLMSG_OK(hdr, static_cast(recv_result)); hdr = NLMSG_NEXT(hdr, recv_result)) { rtmsg* r = (rtmsg*)NLMSG_DATA(hdr); int remaining_len = RTM_PAYLOAD(hdr); From 7d89cfc5e05ad1c7732543283a971bde9068e0f1 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Mon, 12 Jan 2026 12:06:34 +0000 Subject: [PATCH 159/356] Bugfix: txmempool: Fallback to CTxMemPoolEntry copying if Boost is too old for node extraction --- src/txmempool.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 3a5a3fb306..b430202d9b 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -453,11 +453,20 @@ void CTxMemPool::Apply(ChangeSet* changeset) ancestors = *Assume(changeset->CalculateMemPoolAncestors(tx_entry, Limits::NoLimits())); } // First splice this entry into mapTx. +#if BOOST_VERSION >= 107400 auto node_handle = changeset->m_to_add.extract(tx_entry); auto result = mapTx.insert(std::move(node_handle)); Assume(result.inserted); txiter it = result.position; +#else + // Boost 1.73 didn't support node extraction, so we have to copy + auto result = mapTx.emplace(CTxMemPoolEntry::ExplicitCopy, *tx_entry); + changeset->m_to_add.erase(tx_entry); + + Assume(result.second); + txiter it = result.first; +#endif // Now update the entry for ancestors/descendants. if (ancestors.has_value()) { From ec022c693b6cf6ecd02befe22c0ccfdc2f4fb3d2 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Wed, 8 Oct 2025 18:22:42 +0100 Subject: [PATCH 160/356] qt: Modernize custom filtering In `QSortFilterProxyModel`, `invalidateFilter()` is scheduled for deprecation in Qt 6.13. `beginFilterChange()` was introduced in Qt 6.9. `endFilterChange()` was introduced in Qt 6.10. Github-Pull: gui#899 Rebased-From: e15e8cbadad5ce1de41ebb817b87054f8b5192f2 --- src/qt/transactionfilterproxy.cpp | 46 +++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/src/qt/transactionfilterproxy.cpp b/src/qt/transactionfilterproxy.cpp index 3be7e1a969..5771b68178 100644 --- a/src/qt/transactionfilterproxy.cpp +++ b/src/qt/transactionfilterproxy.cpp @@ -62,28 +62,65 @@ bool TransactionFilterProxy::filterAcceptsRow(int sourceRow, const QModelIndex & void TransactionFilterProxy::setDateRange(const std::optional& from, const std::optional& to) { +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + beginFilterChange(); +#endif + dateFrom = from; dateTo = to; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + endFilterChange(QSortFilterProxyModel::Direction::Rows); +#else invalidateFilter(); +#endif } void TransactionFilterProxy::setSearchString(const QString &search_string) { if (m_search_string == search_string) return; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + beginFilterChange(); +#endif + m_search_string = search_string; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + endFilterChange(QSortFilterProxyModel::Direction::Rows); +#else invalidateFilter(); +#endif } void TransactionFilterProxy::setTypeFilter(quint32 modes) { +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + beginFilterChange(); +#endif + this->typeFilter = modes; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + endFilterChange(QSortFilterProxyModel::Direction::Rows); +#else invalidateFilter(); +#endif } void TransactionFilterProxy::setMinAmount(const CAmount& minimum) { +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + beginFilterChange(); +#endif + this->minAmount = minimum; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + endFilterChange(QSortFilterProxyModel::Direction::Rows); +#else invalidateFilter(); +#endif } void TransactionFilterProxy::setWatchOnlyFilter(WatchOnlyFilter filter) @@ -99,8 +136,17 @@ void TransactionFilterProxy::setLimit(int limit) void TransactionFilterProxy::setShowInactive(bool _showInactive) { +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + beginFilterChange(); +#endif + this->showInactive = _showInactive; + +#if QT_VERSION >= QT_VERSION_CHECK(6, 10, 0) + endFilterChange(QSortFilterProxyModel::Direction::Rows); +#else invalidateFilter(); +#endif } int TransactionFilterProxy::rowCount(const QModelIndex &parent) const From 2e4688618ba6a68df0936df0cc86b657ec35b4ef Mon Sep 17 00:00:00 2001 From: ismaelsadeeq Date: Wed, 24 Sep 2025 16:31:38 +0200 Subject: [PATCH 161/356] miner: fix `addPackageTxs` unsigned integer overflow Github-Pull: #33475 Rebased-From: b807dfcdc5929c314d43b790c9e705d5bf0a86e8 --- src/node/miner.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/node/miner.cpp b/src/node/miner.cpp index 33eeaf91fb..b9ecd855f0 100644 --- a/src/node/miner.cpp +++ b/src/node/miner.cpp @@ -394,8 +394,8 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda ++nConsecutiveFailed; - if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight > - m_options.nBlockMaxWeight - m_options.block_reserved_weight) { + if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight + + m_options.block_reserved_weight > m_options.nBlockMaxWeight) { // Give up if we're close to full and haven't succeeded in a while break; } From 7a71850a6d1d2eaf09e19d9d0af574a90487ec2b Mon Sep 17 00:00:00 2001 From: SatsAndSports Date: Tue, 28 Oct 2025 16:52:35 +0100 Subject: [PATCH 162/356] Remove unreliable seed from chainparams.cpp, and the associated README Github-Pull: #33723 Rebased-From: b0c706795ce6a3a00bf068a81ee99fef2ee9bf7e --- contrib/seeds/README.md | 3 +-- src/kernel/chainparams.cpp | 1 - 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/contrib/seeds/README.md b/contrib/seeds/README.md index a1a2e34b5d..58d7f41130 100644 --- a/contrib/seeds/README.md +++ b/contrib/seeds/README.md @@ -10,14 +10,13 @@ to addrman with). Update `MIN_BLOCKS` in `makeseeds.py` and the `-m`/`--minblocks` arguments below, as needed. -The seeds compiled into the release are created from sipa's, achow101's and luke-jr's +The seeds compiled into the release are created from sipa's and achow101's DNS seed, virtu's crawler, and asmap community AS map data. Run the following commands from the `/contrib/seeds` directory: ``` curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt curl https://21.ninja/seeds.txt.gz | gzip -dc >> seeds_main.txt -curl https://luke.dashjr.org/programs/bitcoin/files/charts/seeds.txt >> seeds_main.txt curl https://mainnet.achownodes.xyz/seeds.txt.gz | gzip -dc >> seeds_main.txt curl https://signet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_signet.txt curl https://testnet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_test.txt diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index ac3fc9eada..0f193eff74 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -146,7 +146,6 @@ class CMainParams : public CChainParams { // release ASAP to avoid it where possible. vSeeds.emplace_back("seed.bitcoin.sipa.be."); // Pieter Wuille, only supports x1, x5, x9, and xd vSeeds.emplace_back("dnsseed.bluematt.me."); // Matt Corallo, only supports x9 - vSeeds.emplace_back("dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us."); // Luke Dashjr vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch."); // Jonas Schnelli, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.btc.petertodd.net."); // Peter Todd, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.bitcoin.sprovoost.nl."); // Sjors Provoost From daef5852f02513521654e15d62748648765acf92 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Thu, 8 Jan 2026 12:45:14 +0000 Subject: [PATCH 163/356] guix: Fix `osslsigncode` tests Github-Pull: #34227 Rebased-From: 194114daf385a5db50e1507fda79a1a93240d494 --- contrib/guix/manifest.scm | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index 4e7e955218..176fcee046 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -2,6 +2,7 @@ ((gnu packages bash) #:select (bash-minimal)) (gnu packages bison) ((gnu packages certs) #:select (nss-certs)) + ((gnu packages check) #:select (libfaketime)) ((gnu packages cmake) #:select (cmake-minimal)) (gnu packages commencement) (gnu packages compression) @@ -209,7 +210,17 @@ and abstract ELF, PE and MachO formats.") (base32 "1j47vwq4caxfv0xw68kw5yh00qcpbd56d7rq6c483ma3y7s96yyz")))) (build-system cmake-build-system) - (inputs (list openssl)) + (arguments + (list + #:phases + #~(modify-phases %standard-phases + (replace 'check + (lambda* (#:key tests? #:allow-other-keys) + (if tests? + (invoke "faketime" "-f" "@2025-01-01 00:00:00" ;; Tests fail after 2025. + "ctest" "--output-on-failure" "--no-tests=error") + (format #t "test suite not run~%"))))))) + (inputs (list libfaketime openssl)) (home-page "https://github.com/mtrojnar/osslsigncode") (synopsis "Authenticode signing and timestamping tool") (description "osslsigncode is a small tool that implements part of the From bd19c474410123789bbe50b43c377e299ecc9629 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Tue, 2 Dec 2025 01:09:11 +0100 Subject: [PATCH 164/356] test: check that peer's announced starting height is remembered Note that the announced starting height is neither verified nor used in any other logic, so reporting a bogus value doesn't have any consequences -- it's only used for inspection via the `getpeerinfo` RPC and in some debug messages. Github-Pull: #33990 Rebased-From: 52f96cc235d309d9156eb742036c859984b9a467 --- test/functional/p2p_handshake.py | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py index 18307a2824..1686bce547 100755 --- a/test/functional/p2p_handshake.py +++ b/test/functional/p2p_handshake.py @@ -6,6 +6,7 @@ Test P2P behaviour during the handshake phase (VERSION, VERACK messages). """ import itertools +import random import time from test_framework.test_framework import BitcoinTestFramework @@ -15,9 +16,18 @@ NODE_NONE, NODE_P2P_V2, NODE_WITNESS, + msg_version, +) +from test_framework.p2p import ( + P2PInterface, + P2P_SERVICES, + P2P_SUBVERSION, + P2P_VERSION, +) +from test_framework.util import ( + assert_equal, + p2p_port, ) -from test_framework.p2p import P2PInterface -from test_framework.util import p2p_port # Desirable service flags for outbound non-pruned and pruned peers. Note that @@ -64,6 +74,20 @@ def test_desirable_service_flags(self, node, service_flag_tests, desirable_servi assert (services & desirable_service_flags) == desirable_service_flags self.add_outbound_connection(node, conn_type, services, wait_for_disconnect=False) + def test_startingheight(self, node): + for fake_startheight in [-2**31, -1, 0, 1000000, 2**31-1] + [random.randint(-2**31, 2**31) for _ in range(5)]: + peer = node.add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False) + version = msg_version() + version.nVersion = P2P_VERSION + version.strSubVer = P2P_SUBVERSION + version.nServices = P2P_SERVICES + version.nStartingHeight = fake_startheight + peer.send_message(version) + peer.wait_for_verack() + peer_info = node.getpeerinfo()[-1] + assert_equal(peer_info['startingheight'], fake_startheight) + peer.peer_disconnect() + def generate_at_mocktime(self, time): self.nodes[0].setmocktime(time) self.generate(self.nodes[0], 1) @@ -95,6 +119,9 @@ def run_test(self): node.addconnection(node_listen_addr, "outbound-full-relay", self.options.v2transport) self.wait_until(lambda: len(node.getpeerinfo()) == 0) + self.log.info("Check that peer's announced starting height is remembered") + self.test_startingheight(node) + if __name__ == '__main__': P2PHandshakeTest(__file__).main() From ff488b542c28f2d4f1a0f2068e05615fdf98fcce Mon Sep 17 00:00:00 2001 From: brunoerg Date: Wed, 31 Dec 2025 12:04:38 -0300 Subject: [PATCH 165/356] test: fix feature_pruning when built without wallet Github-Pull: #34185 Rebased-From: 9b57c8d2bd15a414e08a9e43367d8d3d82c25fe4 --- test/functional/feature_pruning.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 519877ac5b..2b39172cb3 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -352,10 +352,6 @@ def wallet_test(self): # check that wallet loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. - self.log.info("Syncing node 5 to test wallet") - self.connect_nodes(0, 5) - nds = [self.nodes[0], self.nodes[5]] - self.sync_blocks(nds, wait=5, timeout=300) self.restart_node(5, extra_args=["-prune=550", "-blockfilterindex=1"]) # restart to trigger rescan self.log.info("Success") @@ -465,6 +461,10 @@ def run_test(self): self.log.info("Test manual pruning with timestamps") self.manual_test(4, use_timestamp=True) + self.log.info("Syncing node 5 to node 0") + self.connect_nodes(0, 5) + self.sync_blocks([self.nodes[0], self.nodes[5]], wait=5, timeout=300) + if self.is_wallet_compiled(): self.log.info("Test wallet re-scan") self.wallet_test() From eca0952bbee7131c3fa7d2290d295b191859dceb Mon Sep 17 00:00:00 2001 From: brunoerg Date: Wed, 31 Dec 2025 15:11:16 -0300 Subject: [PATCH 166/356] test: check wallet rescan properly in feature_pruning Github-Pull: #34185 Rebased-From: 8fb5e5f41ddf550a78b1253184d79a107097815a --- test/functional/feature_pruning.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index 2b39172cb3..a845037957 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -344,16 +344,22 @@ def has_block(index): self.log.info("Success") - def wallet_test(self): + def test_wallet_rescan(self): # check that the pruning node's wallet is still in good shape self.log.info("Stop and start pruning node to trigger wallet rescan") self.restart_node(2, extra_args=["-prune=550"]) - self.log.info("Success") + + wallet_info = self.nodes[2].getwalletinfo() + self.wait_until(lambda: wallet_info["scanning"] == False) + self.wait_until(lambda: wallet_info["lastprocessedblock"]["height"] == self.nodes[2].getblockcount()) # check that wallet loads successfully when restarting a pruned node after IBD. # this was reported to fail in #7494. self.restart_node(5, extra_args=["-prune=550", "-blockfilterindex=1"]) # restart to trigger rescan - self.log.info("Success") + + wallet_info = self.nodes[5].getwalletinfo() + self.wait_until(lambda: wallet_info["scanning"] == False) + self.wait_until(lambda: wallet_info["lastprocessedblock"]["height"] == self.nodes[0].getblockcount()) def run_test(self): self.log.info("Warning! This test requires 4GB of disk space") @@ -467,7 +473,7 @@ def run_test(self): if self.is_wallet_compiled(): self.log.info("Test wallet re-scan") - self.wallet_test() + self.test_wallet_rescan() self.log.info("Test invalid pruning command line options") self.test_invalid_command_line_options() From 7bd80ba46065c6e9f360699177e83485e16f02dd Mon Sep 17 00:00:00 2001 From: Ataraxia Date: Fri, 7 Nov 2025 11:29:49 +0530 Subject: [PATCH 167/356] Capitalise rpcbind-ignored warning message Github-Pull: #33813 Rebased-From: 335a05c69e182af01eb34ab24a0dff3866ac7c24 --- src/httpserver.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 4c7e75fbf3..f8aded81e0 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -373,7 +373,7 @@ static bool HTTPBindAddresses(struct evhttp* http) LogPrintf("WARNING: option -rpcallowip was specified without -rpcbind; this doesn't usually make sense\n"); } if (!gArgs.GetArgs("-rpcbind").empty()) { - InitWarning(_("option -rpcbind was ignored because -rpcallowip was not specified, refusing to allow everyone to connect\n")); + InitWarning(_("Option -rpcbind was ignored because -rpcallowip was not specified, refusing to allow everyone to connect\n")); } } else { // Specific bind addresses for (const std::string& strRPCBind : gArgs.GetArgs("-rpcbind")) { From f44b206a5e99d9d9933c3ba6cebe8845f5d6a9d7 Mon Sep 17 00:00:00 2001 From: Eugene Siegel Date: Wed, 26 Nov 2025 15:51:51 -0500 Subject: [PATCH 168/356] net: fix use-after-free with v2->v1 reconnection logic CConnman::Stop() resets semOutbound, yet m_reconnections is not cleared in Stop. Each ReconnectionInfo contains a grant member that points to the memory that semOutbound pointed to and ~CConnman will attempt to access the grant field (memory that was already freed) when destroying m_reconnections. Fix this by calling m_reconnections.clear() in CConnman::Stop() and add appropriate annotations. Github-Pull: bitcoin/bitcoin#33956 Rebased-From: 167df7a98c8514da6979d45e58fcdcbd0733b8fe --- src/net.cpp | 3 +++ src/net.h | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 6b2ef5f43d..ccb3929252 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -3368,6 +3368,8 @@ void CConnman::StopThreads() void CConnman::StopNodes() { + AssertLockNotHeld(m_reconnections_mutex); + if (fAddressesInitialized) { DumpAddresses(); fAddressesInitialized = false; @@ -3394,6 +3396,7 @@ void CConnman::StopNodes() DeleteNode(pnode); } m_nodes_disconnected.clear(); + WITH_LOCK(m_reconnections_mutex, m_reconnections.clear()); vhListenSocket.clear(); semOutbound.reset(); semAddnode.reset(); diff --git a/src/net.h b/src/net.h index 2f7b832fba..da402cdc41 100644 --- a/src/net.h +++ b/src/net.h @@ -1125,9 +1125,10 @@ class CConnman bool Start(CScheduler& scheduler, const Options& options) EXCLUSIVE_LOCKS_REQUIRED(!m_total_bytes_sent_mutex, !m_added_nodes_mutex, !m_addr_fetches_mutex, !mutexMsgProc); void StopThreads(); - void StopNodes(); - void Stop() + void StopNodes() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex); + void Stop() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex) { + AssertLockNotHeld(m_reconnections_mutex); StopThreads(); StopNodes(); }; From bbbd33e40b5ba2eab3de48b748535b216768b6f5 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Fri, 25 Jul 2025 13:48:42 +0200 Subject: [PATCH 169/356] doc: Fix typo in init log Github-Pull: bitcoin/bitcoin#33960 Rebased-From: 22229de7288fed6369bc70b2af674906e6777ce4 --- src/init.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/init.cpp b/src/init.cpp index 7fdbf75dc6..3dcb4bb9c4 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1773,7 +1773,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } else { // Prior to setting NODE_NETWORK, check if we can provide historical blocks. if (!WITH_LOCK(chainman.GetMutex(), return chainman.BackgroundSyncInProgress())) { - LogPrintf("Setting NODE_NETWORK on non-prune mode\n"); + LogInfo("Setting NODE_NETWORK in non-prune mode"); g_local_services = ServiceFlags(g_local_services | NODE_NETWORK); } else { LogPrintf("Running node in NODE_NETWORK_LIMITED mode until snapshot background sync completes\n"); From 2cde006420db5392f29fa16d359e474a0f779b32 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Thu, 27 Nov 2025 10:31:30 +0100 Subject: [PATCH 170/356] log: Use LogError for fatal errors Github-Pull: bitcoin/bitcoin#33960 Rebased-From: fa0018d01102ad1d358eee20d8bae1e438ceebf8 --- src/addrman.cpp | 2 +- src/dbwrapper.cpp | 8 ++++---- src/flatfile.cpp | 4 ++-- src/httpserver.cpp | 4 ++-- src/node/utxo_snapshot.cpp | 4 ++-- src/sync.cpp | 22 +++++++++++----------- src/txdb.cpp | 2 +- src/util/fs_helpers.cpp | 12 ++++++------ src/validation.cpp | 32 ++++++++++++++++---------------- src/wallet/scriptpubkeyman.cpp | 2 +- 10 files changed, 46 insertions(+), 46 deletions(-) diff --git a/src/addrman.cpp b/src/addrman.cpp index 9c3a24db90..5cd1d41fd9 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -1055,7 +1055,7 @@ void AddrManImpl::Check() const const int err{CheckAddrman()}; if (err) { - LogPrintf("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i\n", err); + LogError("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i", err); assert(false); } } diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp index 8fb366515a..d2217aadef 100644 --- a/src/dbwrapper.cpp +++ b/src/dbwrapper.cpp @@ -46,8 +46,8 @@ static void HandleError(const leveldb::Status& status) if (status.ok()) return; const std::string errmsg = "Fatal LevelDB error: " + status.ToString(); - LogPrintf("%s\n", errmsg); - LogPrintf("You can use -debug=leveldb to get more complete diagnostic messages\n"); + LogError("%s", errmsg); + LogInfo("You can use -debug=leveldb to get more complete diagnostic messages"); throw dbwrapper_error(errmsg); } @@ -344,7 +344,7 @@ std::optional CDBWrapper::ReadImpl(Span key) const if (!status.ok()) { if (status.IsNotFound()) return std::nullopt; - LogPrintf("LevelDB read failure: %s\n", status.ToString()); + LogError("LevelDB read failure: %s", status.ToString()); HandleError(status); } return strValue; @@ -359,7 +359,7 @@ bool CDBWrapper::ExistsImpl(Span key) const if (!status.ok()) { if (status.IsNotFound()) return false; - LogPrintf("LevelDB read failure: %s\n", status.ToString()); + LogError("LevelDB read failure: %s", status.ToString()); HandleError(status); } return true; diff --git a/src/flatfile.cpp b/src/flatfile.cpp index df6596e940..33d8baf44f 100644 --- a/src/flatfile.cpp +++ b/src/flatfile.cpp @@ -41,11 +41,11 @@ FILE* FlatFileSeq::Open(const FlatFilePos& pos, bool read_only) const if (!file && !read_only) file = fsbridge::fopen(path, "wb+"); if (!file) { - LogPrintf("Unable to open file %s\n", fs::PathToString(path)); + LogError("Unable to open file %s", fs::PathToString(path)); return nullptr; } if (pos.nPos && fseek(file, pos.nPos, SEEK_SET)) { - LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, fs::PathToString(path)); + LogError("Unable to seek to position %u of %s", pos.nPos, fs::PathToString(path)); if (fclose(file) != 0) { LogError("Unable to close file %s", fs::PathToString(path)); } diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 615c11f411..c89b48be6b 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -563,7 +563,7 @@ bool InitHTTPServer(const util::SignalInterrupt& interrupt) raii_evhttp http_ctr = obtain_evhttp(base_ctr.get()); struct evhttp* http = http_ctr.get(); if (!http) { - LogPrintf("couldn't create evhttp. Exiting.\n"); + LogError("Couldn't create evhttp. Exiting."); return false; } @@ -573,7 +573,7 @@ bool InitHTTPServer(const util::SignalInterrupt& interrupt) evhttp_set_gencb(http, http_request_cb, (void*)&interrupt); if (!HTTPBindAddresses(http)) { - LogPrintf("Unable to bind all endpoints for RPC server\n"); + LogError("Unable to bind all endpoints for RPC server"); return false; } diff --git a/src/node/utxo_snapshot.cpp b/src/node/utxo_snapshot.cpp index ca5491bdc2..af06d832db 100644 --- a/src/node/utxo_snapshot.cpp +++ b/src/node/utxo_snapshot.cpp @@ -32,14 +32,14 @@ bool WriteSnapshotBaseBlockhash(Chainstate& snapshot_chainstate) FILE* file{fsbridge::fopen(write_to, "wb")}; AutoFile afile{file}; if (afile.IsNull()) { - LogPrintf("[snapshot] failed to open base blockhash file for writing: %s\n", + LogError("[snapshot] failed to open base blockhash file for writing: %s", fs::PathToString(write_to)); return false; } afile << *snapshot_chainstate.m_from_snapshot_blockhash; if (afile.fclose() != 0) { - LogPrintf("[snapshot] failed to close base blockhash file %s after writing\n", + LogError("[snapshot] failed to close base blockhash file %s after writing", fs::PathToString(write_to)); return false; } diff --git a/src/sync.cpp b/src/sync.cpp index 93c9194541..212e4e36d2 100644 --- a/src/sync.cpp +++ b/src/sync.cpp @@ -90,8 +90,8 @@ LockData& GetLockData() { static void potential_deadlock_detected(const LockPair& mismatch, const LockStack& s1, const LockStack& s2) { - LogPrintf("POTENTIAL DEADLOCK DETECTED\n"); - LogPrintf("Previous lock order was:\n"); + LogError("POTENTIAL DEADLOCK DETECTED"); + LogError("Previous lock order was:"); for (const LockStackItem& i : s1) { std::string prefix{}; if (i.first == mismatch.first) { @@ -100,11 +100,11 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac if (i.first == mismatch.second) { prefix = " (2)"; } - LogPrintf("%s %s\n", prefix, i.second.ToString()); + LogError("%s %s", prefix, i.second.ToString()); } std::string mutex_a, mutex_b; - LogPrintf("Current lock order is:\n"); + LogError("Current lock order is:"); for (const LockStackItem& i : s2) { std::string prefix{}; if (i.first == mismatch.first) { @@ -115,7 +115,7 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac prefix = " (2)"; mutex_b = i.second.Name(); } - LogPrintf("%s %s\n", prefix, i.second.ToString()); + LogError("%s %s", prefix, i.second.ToString()); } if (g_debug_lockorder_abort) { tfm::format(std::cerr, "Assertion failed: detected inconsistent lock order for %s, details in debug log.\n", s2.back().second.ToString()); @@ -126,14 +126,14 @@ static void potential_deadlock_detected(const LockPair& mismatch, const LockStac static void double_lock_detected(const void* mutex, const LockStack& lock_stack) { - LogPrintf("DOUBLE LOCK DETECTED\n"); - LogPrintf("Lock order:\n"); + LogError("DOUBLE LOCK DETECTED"); + LogError("Lock order:"); for (const LockStackItem& i : lock_stack) { std::string prefix{}; if (i.first == mutex) { prefix = " (*)"; } - LogPrintf("%s %s\n", prefix, i.second.ToString()); + LogError("%s %s", prefix, i.second.ToString()); } if (g_debug_lockorder_abort) { tfm::format(std::cerr, @@ -225,10 +225,10 @@ void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, c } } - LogPrintf("INCONSISTENT LOCK ORDER DETECTED\n"); - LogPrintf("Current lock order (least recent first) is:\n"); + LogError("INCONSISTENT LOCK ORDER DETECTED"); + LogError("Current lock order (least recent first) is:"); for (const LockStackItem& i : lock_stack) { - LogPrintf(" %s\n", i.second.ToString()); + LogError(" %s", i.second.ToString()); } if (g_debug_lockorder_abort) { tfm::format(std::cerr, "%s:%s %s was not most recent critical section locked, details in debug log.\n", file, line, guardname); diff --git a/src/txdb.cpp b/src/txdb.cpp index 1622039d63..0b1a8a63a7 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -134,7 +134,7 @@ bool CCoinsViewDB::BatchWrite(CoinsViewCacheCursor& cursor, const uint256 &hashB if (m_options.simulate_crash_ratio) { static FastRandomContext rng; if (rng.randrange(m_options.simulate_crash_ratio) == 0) { - LogPrintf("Simulating a crash. Goodbye.\n"); + LogError("Simulating a crash. Goodbye."); _Exit(0); } } diff --git a/src/util/fs_helpers.cpp b/src/util/fs_helpers.cpp index e81fc07d06..4c525983e9 100644 --- a/src/util/fs_helpers.cpp +++ b/src/util/fs_helpers.cpp @@ -108,28 +108,28 @@ std::streampos GetFileSize(const char* path, std::streamsize max) bool FileCommit(FILE* file) { if (fflush(file) != 0) { // harmless if redundantly called - LogPrintf("fflush failed: %s\n", SysErrorString(errno)); + LogError("fflush failed: %s", SysErrorString(errno)); return false; } #ifdef WIN32 HANDLE hFile = (HANDLE)_get_osfhandle(_fileno(file)); if (FlushFileBuffers(hFile) == 0) { - LogPrintf("FlushFileBuffers failed: %s\n", Win32ErrorString(GetLastError())); + LogError("FlushFileBuffers failed: %s", Win32ErrorString(GetLastError())); return false; } #elif defined(__APPLE__) && defined(F_FULLFSYNC) if (fcntl(fileno(file), F_FULLFSYNC, 0) == -1) { // Manpage says "value other than -1" is returned on success - LogPrintf("fcntl F_FULLFSYNC failed: %s\n", SysErrorString(errno)); + LogError("fcntl F_FULLFSYNC failed: %s", SysErrorString(errno)); return false; } #elif HAVE_FDATASYNC if (fdatasync(fileno(file)) != 0 && errno != EINVAL) { // Ignore EINVAL for filesystems that don't support sync - LogPrintf("fdatasync failed: %s\n", SysErrorString(errno)); + LogError("fdatasync failed: %s", SysErrorString(errno)); return false; } #else if (fsync(fileno(file)) != 0 && errno != EINVAL) { - LogPrintf("fsync failed: %s\n", SysErrorString(errno)); + LogError("fsync failed: %s", SysErrorString(errno)); return false; } #endif @@ -286,7 +286,7 @@ fs::path GetSpecialFolderPath(int nFolder, bool fCreate) return fs::path(pszPath); } - LogPrintf("SHGetSpecialFolderPathW() failed, could not obtain requested path.\n"); + LogError("SHGetSpecialFolderPathW() failed, could not obtain requested path."); return fs::path(""); } #endif diff --git a/src/validation.cpp b/src/validation.cpp index a1ac4e1e14..f5c795f387 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1278,7 +1278,7 @@ bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws) unsigned int currentBlockScriptVerifyFlags{GetBlockScriptFlags(*m_active_chainstate.m_chain.Tip(), m_active_chainstate.m_chainman)}; if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, ws.m_precomputed_txdata, m_active_chainstate.CoinsTip(), GetValidationCache())) { - LogPrintf("BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s\n", hash.ToString(), state.ToString()); + LogError("BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s", hash.ToString(), state.ToString()); return Assume(false); } @@ -3488,8 +3488,8 @@ bool Chainstate::ActivateBestChain(BlockValidationState& state, std::shared_ptr< // Belt-and-suspenders check that we aren't attempting to advance the background // chainstate past the snapshot base block. if (WITH_LOCK(::cs_main, return m_disabled)) { - LogPrintf("m_disabled is set - this chainstate should not be in operation. " - "Please report this as a bug. %s\n", CLIENT_BUGREPORT); + LogError("m_disabled is set - this chainstate should not be in operation. " + "Please report this as a bug. %s", CLIENT_BUGREPORT); return false; } @@ -4798,12 +4798,12 @@ VerifyDBResult CVerifyDB::VerifyDB( CBlock block; // check level 0: read from disk if (!chainstate.m_blockman.ReadBlock(block, *pindex)) { - LogPrintf("Verification error: ReadBlock failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("Verification error: ReadBlock failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); return VerifyDBResult::CORRUPTED_BLOCK_DB; } // check level 1: verify block validity if (nCheckLevel >= 1 && !CheckBlock(block, state, consensus_params)) { - LogPrintf("Verification error: found bad block at %d, hash=%s (%s)\n", + LogError("Verification error: found bad block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); return VerifyDBResult::CORRUPTED_BLOCK_DB; } @@ -4812,7 +4812,7 @@ VerifyDBResult CVerifyDB::VerifyDB( CBlockUndo undo; if (!pindex->GetUndoPos().IsNull()) { if (!chainstate.m_blockman.ReadBlockUndo(undo, *pindex)) { - LogPrintf("Verification error: found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("Verification error: found bad undo data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); return VerifyDBResult::CORRUPTED_BLOCK_DB; } } @@ -4825,7 +4825,7 @@ VerifyDBResult CVerifyDB::VerifyDB( assert(coins.GetBestBlock() == pindex->GetBlockHash()); DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins); if (res == DISCONNECT_FAILED) { - LogPrintf("Verification error: irrecoverable inconsistency in block data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("Verification error: irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); return VerifyDBResult::CORRUPTED_BLOCK_DB; } if (res == DISCONNECT_UNCLEAN) { @@ -4841,7 +4841,7 @@ VerifyDBResult CVerifyDB::VerifyDB( if (chainstate.m_chainman.m_interrupt) return VerifyDBResult::INTERRUPTED; } if (pindexFailure) { - LogPrintf("Verification error: coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions); + LogError("Verification error: coin database inconsistencies found (last %i blocks, %i good transactions before that)", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions); return VerifyDBResult::CORRUPTED_BLOCK_DB; } if (skipped_l3_checks) { @@ -4864,11 +4864,11 @@ VerifyDBResult CVerifyDB::VerifyDB( pindex = chainstate.m_chain.Next(pindex); CBlock block; if (!chainstate.m_blockman.ReadBlock(block, *pindex)) { - LogPrintf("Verification error: ReadBlock failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + LogError("Verification error: ReadBlock failed at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString()); return VerifyDBResult::CORRUPTED_BLOCK_DB; } if (!chainstate.ConnectBlock(block, state, pindex, coins)) { - LogPrintf("Verification error: found unconnectable block at %d, hash=%s (%s)\n", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); + LogError("Verification error: found unconnectable block at %d, hash=%s (%s)", pindex->nHeight, pindex->GetBlockHash().ToString(), state.ToString()); return VerifyDBResult::CORRUPTED_BLOCK_DB; } if (chainstate.m_chainman.m_interrupt) return VerifyDBResult::INTERRUPTED; @@ -5701,7 +5701,7 @@ Chainstate& ChainstateManager::InitializeChainstate(CTxMemPool* mempool) const bool destroyed = DestroyDB(path_str); if (!destroyed) { - LogPrintf("error: leveldb DestroyDB call failed on %s\n", path_str); + LogError("leveldb DestroyDB call failed on %s", path_str); } // Datadir should be removed from filesystem; otherwise initialization may detect @@ -6400,8 +6400,8 @@ util::Result Chainstate::InvalidateCoinsDBOnDisk() auto src_str = fs::PathToString(snapshot_datadir); auto dest_str = fs::PathToString(invalid_path); - LogPrintf("%s: error renaming file '%s' -> '%s': %s\n", - __func__, src_str, dest_str, e.what()); + LogError("While invalidating the coins db: Error renaming file '%s' -> '%s': %s", + src_str, dest_str, e.what()); return util::Error{strprintf(_( "Rename of '%s' -> '%s' failed. " "You should resolve this by manually moving or deleting the invalid " @@ -6420,7 +6420,7 @@ bool ChainstateManager::DeleteSnapshotChainstate() fs::path snapshot_datadir = Assert(node::FindSnapshotChainstateDir(m_options.datadir)).value(); if (!DeleteCoinsDBFromDisk(snapshot_datadir, /*is_snapshot=*/ true)) { - LogPrintf("Deletion of %s failed. Please remove it manually to continue reindexing.\n", + LogError("Deletion of %s failed. Please remove it manually to continue reindexing.", fs::PathToString(snapshot_datadir)); return false; } @@ -6482,8 +6482,8 @@ bool ChainstateManager::ValidatedSnapshotCleanup() // is in-memory, in which case we can't do on-disk cleanup. You'd better be // in a unittest! if (!ibd_chainstate_path_maybe || !snapshot_chainstate_path_maybe) { - LogPrintf("[snapshot] snapshot chainstate cleanup cannot happen with " - "in-memory chainstates. You are testing, right?\n"); + LogError("[snapshot] snapshot chainstate cleanup cannot happen with " + "in-memory chainstates. You are testing, right?"); return false; } diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 04287581f5..40c95d5b4b 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -2000,7 +2000,7 @@ std::optional LegacyDataSPKM::MigrateToDescriptor() // Make sure that we have accounted for all scriptPubKeys if (!Assume(spks.empty())) { - LogPrintf("%s\n", STR_INTERNAL_BUG("Error: Some output scripts were not migrated.\n")); + LogError("%s", STR_INTERNAL_BUG("Error: Some output scripts were not migrated.")); return std::nullopt; } From c26fe1cfe89b7382654bb7254640d135fe9a729a Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 26 Nov 2025 17:13:09 +0100 Subject: [PATCH 171/356] log: Use LogWarning for non-critical logs As per doc/developer-notes#logging, LogWarning should be used for severe problems that do not warrant shutting down the node Github-Pull: bitcoin/bitcoin#33960 Rebased-From: fa45a1503eee603059166071857215ea9bd7242a --- src/common/args.cpp | 6 +-- src/common/config.cpp | 4 +- src/common/system.cpp | 5 ++- src/httprpc.cpp | 10 ++--- src/httpserver.cpp | 10 ++--- src/init.cpp | 14 +++---- src/ipc/process.cpp | 4 +- src/logging.cpp | 2 +- src/net_processing.cpp | 4 +- src/netbase.cpp | 2 +- src/node/blockstorage.cpp | 2 +- src/node/chainstate.cpp | 2 +- src/node/utxo_snapshot.cpp | 12 +++--- src/policy/fees.cpp | 8 ++-- src/torcontrol.cpp | 58 +++++++++++++------------- src/util/batchpriority.cpp | 2 +- src/util/exception.cpp | 2 +- src/util/sock.cpp | 2 +- src/validation.cpp | 28 ++++++------- src/wallet/migrate.cpp | 2 +- src/wallet/scriptpubkeyman.cpp | 8 ++-- src/wallet/sqlite.cpp | 38 ++++++++--------- test/functional/feature_config_args.py | 4 +- 23 files changed, 115 insertions(+), 114 deletions(-) diff --git a/src/common/args.cpp b/src/common/args.cpp index 833a0b28bd..0c90135b40 100644 --- a/src/common/args.cpp +++ b/src/common/args.cpp @@ -115,7 +115,7 @@ std::optional InterpretValue(const KeyInfo& key, const st } // Double negatives like -nofoo=0 are supported (but discouraged) if (value && !InterpretBool(*value)) { - LogPrintf("Warning: parsed potentially confusing double-negative -%s=%s\n", key.name, *value); + LogWarning("Parsed potentially confusing double-negative -%s=%s", key.name, *value); return true; } return false; @@ -394,7 +394,7 @@ static void SaveErrors(const std::vector errors, std::vectoremplace_back(error); } else { - LogPrintf("%s\n", error); + LogWarning("%s", error); } } } @@ -416,7 +416,7 @@ bool ArgsManager::ReadSettingsFile(std::vector* errors) for (const auto& setting : m_settings.rw_settings) { KeyInfo key = InterpretKey(setting.first); // Split setting key into section and argname if (!GetArgFlags('-' + key.name)) { - LogPrintf("Ignoring unknown rw_settings value %s\n", setting.first); + LogWarning("Ignoring unknown rw_settings value %s", setting.first); } } return true; diff --git a/src/common/config.cpp b/src/common/config.cpp index fac4aa314c..f6ff6ae788 100644 --- a/src/common/config.cpp +++ b/src/common/config.cpp @@ -84,7 +84,7 @@ bool IsConfSupported(KeyInfo& key, std::string& error) { if (key.name == "reindex") { // reindex can be set in a config file but it is strongly discouraged as this will cause the node to reindex on // every restart. Allow the config but throw a warning - LogPrintf("Warning: reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary\n"); + LogWarning("reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary"); return true; } return true; @@ -109,7 +109,7 @@ bool ArgsManager::ReadConfigStream(std::istream& stream, const std::string& file m_settings.ro_config[key.section][key.name].push_back(*value); } else { if (ignore_invalid_keys) { - LogPrintf("Ignoring unknown configuration value %s\n", option.first); + LogWarning("Ignoring unknown configuration value %s", option.first); } else { error = strprintf("Invalid configuration value %s", option.first); return false; diff --git a/src/common/system.cpp b/src/common/system.cpp index 7af792db44..a8b005670c 100644 --- a/src/common/system.cpp +++ b/src/common/system.cpp @@ -51,8 +51,9 @@ void runCommand(const std::string& strCommand) #else int nErr = ::_wsystem(std::wstring_convert,wchar_t>().from_bytes(strCommand).c_str()); #endif - if (nErr) - LogPrintf("runCommand error: system(%s) returned %d\n", strCommand, nErr); + if (nErr) { + LogWarning("runCommand error: system(%s) returned %d", strCommand, nErr); + } } #endif diff --git a/src/httprpc.cpp b/src/httprpc.cpp index 57893702b8..3cd0d4d6c5 100644 --- a/src/httprpc.cpp +++ b/src/httprpc.cpp @@ -172,7 +172,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) jreq.context = context; jreq.peerAddr = req->GetPeer().ToStringAddrPort(); if (!RPCAuthorized(authHeader.second, jreq.authUser)) { - LogPrintf("ThreadRPCServer incorrect password attempt from %s\n", jreq.peerAddr); + LogWarning("ThreadRPCServer incorrect password attempt from %s", jreq.peerAddr); /* Deter brute-forcing If this results in a DoS the user really @@ -196,7 +196,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) UniValue reply; bool user_has_whitelist = g_rpc_whitelist.count(jreq.authUser); if (!user_has_whitelist && g_rpc_whitelist_default) { - LogPrintf("RPC User %s not allowed to call any methods\n", jreq.authUser); + LogWarning("RPC User %s not allowed to call any methods", jreq.authUser); req->WriteReply(HTTP_FORBIDDEN); return false; @@ -204,7 +204,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) } else if (valRequest.isObject()) { jreq.parse(valRequest); if (user_has_whitelist && !g_rpc_whitelist[jreq.authUser].count(jreq.strMethod)) { - LogPrintf("RPC User %s not allowed to call method %s\n", jreq.authUser, jreq.strMethod); + LogWarning("RPC User %s not allowed to call method %s", jreq.authUser, jreq.strMethod); req->WriteReply(HTTP_FORBIDDEN); return false; } @@ -234,7 +234,7 @@ static bool HTTPReq_JSONRPC(const std::any& context, HTTPRequest* req) // Parse method std::string strMethod = request.find_value("method").get_str(); if (!g_rpc_whitelist[jreq.authUser].count(strMethod)) { - LogPrintf("RPC User %s not allowed to call method %s\n", jreq.authUser, strMethod); + LogWarning("RPC User %s not allowed to call method %s", jreq.authUser, strMethod); req->WriteReply(HTTP_FORBIDDEN); return false; } @@ -328,7 +328,7 @@ static bool InitRPCAuthentication() fields.insert(fields.end(), salt_hmac.begin(), salt_hmac.end()); g_rpcauth.push_back(fields); } else { - LogPrintf("Invalid -rpcauth argument.\n"); + LogWarning("Invalid -rpcauth argument."); return false; } } diff --git a/src/httpserver.cpp b/src/httpserver.cpp index c89b48be6b..16af88cdec 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -330,7 +330,7 @@ static void http_request_cb(struct evhttp_request* req, void* arg) if (g_work_queue->Enqueue(item.get())) { item.release(); /* if true, queue took ownership */ } else { - LogPrintf("WARNING: request rejected because http work queue depth exceeded, it can be increased with the -rpcworkqueue= setting\n"); + LogWarning("Request rejected because http work queue depth exceeded, it can be increased with the -rpcworkqueue= setting"); item->req->WriteReply(HTTP_SERVICE_UNAVAILABLE, "Work queue depth exceeded"); } } else { @@ -454,7 +454,7 @@ static bool HTTPBindAddresses(struct evhttp* http) endpoints.emplace_back("127.0.0.1", http_port); is_default = true; if (!gArgs.GetArgs("-rpcallowip").empty()) { - LogPrintf("WARNING: option -rpcallowip was specified without -rpcbind; this doesn't usually make sense\n"); + LogWarning("Option -rpcallowip was specified without -rpcbind; this doesn't usually make sense"); } if (!gArgs.GetArgs("-rpcbind").empty()) { LogPrintf("WARNING: option -rpcbind was ignored because -rpcallowip was not specified, refusing to allow everyone to connect\n"); @@ -480,7 +480,7 @@ static bool HTTPBindAddresses(struct evhttp* http) if (bind_handle) { const std::optional addr{LookupHost(i->first, false)}; if (i->first.empty() || (addr.has_value() && addr->IsBindAny())) { - LogPrintf("WARNING: the RPC server is not safe to expose to untrusted networks such as the public internet\n"); + LogWarning("The RPC server is not safe to expose to untrusted networks such as the public internet"); } // Set the no-delay option (disable Nagle's algorithm) on the TCP socket. evutil_socket_t fd = evhttp_bound_socket_get_fd(bind_handle); @@ -492,7 +492,7 @@ static bool HTTPBindAddresses(struct evhttp* http) } else { int err = EVUTIL_SOCKET_ERROR(); if (!is_default || (err != EADDRNOTAVAIL && err != ENOENT && err != EOPNOTSUPP && !ignorable_error)) { - LogPrintf("Binding RPC on address %s port %i failed (Error: %s).\n", i->first, i->second, NetworkErrorString(err)); + LogWarning("Binding RPC on address %s port %i failed (Error: %s).", i->first, i->second, NetworkErrorString(err)); num_fail += 1; } else { // Don't count failure if binding was not explicitly configured @@ -703,7 +703,7 @@ HTTPRequest::~HTTPRequest() { if (!replySent) { // Keep track of whether reply was sent to avoid request leaks - LogPrintf("%s: Unhandled request\n", __func__); + LogWarning("Unhandled HTTP request"); WriteReply(HTTP_INTERNAL_SERVER_ERROR, "Unhandled request"); } // evhttpd cleans up the request, as long as a reply was sent. diff --git a/src/init.cpp b/src/init.cpp index 3dcb4bb9c4..7f174a826e 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -198,7 +198,7 @@ static void RemovePidFile(const ArgsManager& args) const auto pid_path{GetPidFile(args)}; if (std::error_code error; !fs::remove(pid_path, error)) { std::string msg{error ? error.message() : "File does not exist"}; - LogPrintf("Unable to remove PID file (%s): %s\n", fs::PathToString(pid_path), msg); + LogWarning("Unable to remove PID file (%s): %s", fs::PathToString(pid_path), msg); } } @@ -1290,7 +1290,7 @@ static ChainstateLoadResult InitAndLoadChainstate( index->Interrupt(); index->Stop(); if (!(index->Init() && index->StartBackgroundSync())) { - LogPrintf("[snapshot] WARNING failed to restart index %s on snapshot chain\n", index->GetName()); + LogWarning("[snapshot] Failed to restart index %s on snapshot chain", index->GetName()); } } }; @@ -1354,11 +1354,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // Warn about relative -datadir path. if (args.IsArgSet("-datadir") && !args.GetPathArg("-datadir").is_absolute()) { - LogPrintf("Warning: relative datadir option '%s' specified, which will be interpreted relative to the " - "current working directory '%s'. This is fragile, because if bitcoin is started in the future " - "from a different location, it will be unable to locate the current data files. There could " - "also be data loss if bitcoin is started while in a temporary directory.\n", - args.GetArg("-datadir", ""), fs::PathToString(fs::current_path())); + LogWarning("Relative datadir option '%s' specified, which will be interpreted relative to the " + "current working directory '%s'. This is fragile, because if bitcoin is started in the future " + "from a different location, it will be unable to locate the current data files. There could " + "also be data loss if bitcoin is started while in a temporary directory.", + args.GetArg("-datadir", ""), fs::PathToString(fs::current_path())); } assert(!node.scheduler); diff --git a/src/ipc/process.cpp b/src/ipc/process.cpp index bdc541b654..07957d90ce 100644 --- a/src/ipc/process.cpp +++ b/src/ipc/process.cpp @@ -114,7 +114,7 @@ int ProcessImpl::connect(const fs::path& data_dir, } int connect_error = errno; if (::close(fd) != 0) { - LogPrintf("Error closing file descriptor %i '%s': %s\n", fd, address, SysErrorString(errno)); + LogWarning("Error closing file descriptor %i '%s': %s", fd, address, SysErrorString(errno)); } throw std::system_error(connect_error, std::system_category()); } @@ -145,7 +145,7 @@ int ProcessImpl::bind(const fs::path& data_dir, const std::string& exe_name, std } int bind_error = errno; if (::close(fd) != 0) { - LogPrintf("Error closing file descriptor %i: %s\n", fd, SysErrorString(errno)); + LogWarning("Error closing file descriptor %i: %s", fd, SysErrorString(errno)); } throw std::system_error(bind_error, std::system_category()); } diff --git a/src/logging.cpp b/src/logging.cpp index 5f055566ef..1dc2dd373b 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -474,7 +474,7 @@ void BCLog::Logger::ShrinkDebugFile() // Restart the file with some of the end std::vector vch(RECENT_DEBUG_HISTORY_SIZE, 0); if (fseek(file, -((long)vch.size()), SEEK_END)) { - LogPrintf("Failed to shrink debug log file: fseek(...) failed\n"); + LogWarning("Failed to shrink debug log file: fseek(...) failed"); fclose(file); return; } diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 1da3ec9d21..c199da3799 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -4902,13 +4902,13 @@ bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer) if (pnode.HasPermission(NetPermissionFlags::NoBan)) { // We never disconnect or discourage peers for bad behavior if they have NetPermissionFlags::NoBan permission - LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id); + LogWarning("Not punishing noban peer %d!", peer.m_id); return false; } if (pnode.IsManualConn()) { // We never disconnect or discourage manual peers for bad behavior - LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id); + LogWarning("Not punishing manually connected peer %d!", peer.m_id); return false; } diff --git a/src/netbase.cpp b/src/netbase.cpp index eaca5a16c1..716d990baf 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -103,7 +103,7 @@ enum Network ParseNetwork(const std::string& net_in) { if (net == "ipv6") return NET_IPV6; if (net == "onion") return NET_ONION; if (net == "tor") { - LogPrintf("Warning: net name 'tor' is deprecated and will be removed in the future. You should use 'onion' instead.\n"); + LogWarning("Net name 'tor' is deprecated and will be removed in the future. You should use 'onion' instead."); return NET_ONION; } if (net == "i2p") { diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 4179cc8c64..3863388e7c 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -1271,7 +1271,7 @@ void ImportBlocks(ChainstateManager& chainman, std::span import_ return; } } else { - LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path)); + LogWarning("Could not open blocks file %s", fs::PathToString(path)); } } diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp index c88bd5bad2..39f6943b5e 100644 --- a/src/node/chainstate.cpp +++ b/src/node/chainstate.cpp @@ -151,7 +151,7 @@ ChainstateLoadResult LoadChainstate(ChainstateManager& chainman, const CacheSize } LogPrintf("Setting nMinimumChainWork=%s\n", chainman.MinimumChainWork().GetHex()); if (chainman.MinimumChainWork() < UintToArith256(chainman.GetConsensus().nMinimumChainWork)) { - LogPrintf("Warning: nMinimumChainWork set below default value of %s\n", chainman.GetConsensus().nMinimumChainWork.GetHex()); + LogWarning("nMinimumChainWork set below default value of %s", chainman.GetConsensus().nMinimumChainWork.GetHex()); } if (chainman.m_blockman.GetPruneTarget() == BlockManager::PRUNE_TARGET_MANUAL) { LogPrintf("Block pruning enabled. Use RPC call pruneblockchain(height) to manually prune block and undo files.\n"); diff --git a/src/node/utxo_snapshot.cpp b/src/node/utxo_snapshot.cpp index af06d832db..d6e878ae7e 100644 --- a/src/node/utxo_snapshot.cpp +++ b/src/node/utxo_snapshot.cpp @@ -49,16 +49,16 @@ bool WriteSnapshotBaseBlockhash(Chainstate& snapshot_chainstate) std::optional ReadSnapshotBaseBlockhash(fs::path chaindir) { if (!fs::exists(chaindir)) { - LogPrintf("[snapshot] cannot read base blockhash: no chainstate dir " - "exists at path %s\n", fs::PathToString(chaindir)); + LogWarning("[snapshot] cannot read base blockhash: no chainstate dir " + "exists at path %s", fs::PathToString(chaindir)); return std::nullopt; } const fs::path read_from = chaindir / node::SNAPSHOT_BLOCKHASH_FILENAME; const std::string read_from_str = fs::PathToString(read_from); if (!fs::exists(read_from)) { - LogPrintf("[snapshot] snapshot chainstate dir is malformed! no base blockhash file " - "exists at path %s. Try deleting %s and calling loadtxoutset again?\n", + LogWarning("[snapshot] snapshot chainstate dir is malformed! no base blockhash file " + "exists at path %s. Try deleting %s and calling loadtxoutset again?", fs::PathToString(chaindir), read_from_str); return std::nullopt; } @@ -67,7 +67,7 @@ std::optional ReadSnapshotBaseBlockhash(fs::path chaindir) FILE* file{fsbridge::fopen(read_from, "rb")}; AutoFile afile{file}; if (afile.IsNull()) { - LogPrintf("[snapshot] failed to open base blockhash file for reading: %s\n", + LogWarning("[snapshot] failed to open base blockhash file for reading: %s", read_from_str); return std::nullopt; } @@ -76,7 +76,7 @@ std::optional ReadSnapshotBaseBlockhash(fs::path chaindir) int64_t position = afile.tell(); afile.seek(0, SEEK_END); if (position != afile.tell()) { - LogPrintf("[snapshot] warning: unexpected trailing data in %s\n", read_from_str); + LogWarning("[snapshot] unexpected trailing data in %s", read_from_str); } return base_blockhash; } diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index ef967bcca9..c4323a95b0 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -567,12 +567,12 @@ CBlockPolicyEstimator::CBlockPolicyEstimator(const fs::path& estimation_filepath std::chrono::hours file_age = GetFeeEstimatorFileAge(); if (file_age > MAX_FILE_AGE && !read_stale_estimates) { - LogPrintf("Fee estimation file %s too old (age=%lld > %lld hours) and will not be used to avoid serving stale estimates.\n", fs::PathToString(m_estimation_filepath), Ticks(file_age), Ticks(MAX_FILE_AGE)); + LogWarning("Fee estimation file %s too old (age=%lld > %lld hours) and will not be used to avoid serving stale estimates.", fs::PathToString(m_estimation_filepath), Ticks(file_age), Ticks(MAX_FILE_AGE)); return; } if (!Read(est_file)) { - LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath)); + LogWarning("Failed to read fee estimates from %s. Continue anyway.", fs::PathToString(m_estimation_filepath)); } } @@ -955,10 +955,10 @@ void CBlockPolicyEstimator::FlushFeeEstimates() { AutoFile est_file{fsbridge::fopen(m_estimation_filepath, "wb")}; if (est_file.IsNull() || !Write(est_file)) { - LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", fs::PathToString(m_estimation_filepath)); + LogWarning("Failed to write fee estimates to %s. Continue anyway.", fs::PathToString(m_estimation_filepath)); (void)est_file.fclose(); } else if (est_file.fclose() != 0) { - LogError("Failed to close fee estimates file %s: %s. Continuing anyway.", fs::PathToString(m_estimation_filepath), SysErrorString(errno)); + LogWarning("Failed to close fee estimates file %s: %s. Continuing anyway.", fs::PathToString(m_estimation_filepath), SysErrorString(errno)); } else { LogPrintf("Flushed fee estimates to %s.\n", fs::PathToString(m_estimation_filepath.filename())); } diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index 17c51cd8e0..a578a3eee4 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -120,7 +120,7 @@ void TorControlConnection::readcb(struct bufferevent *bev, void *ctx) // Do this after evbuffer_readln to make sure all full lines have been // removed from the buffer. Everything left is an incomplete line. if (evbuffer_get_length(input) > MAX_LINE_LENGTH) { - LogPrintf("tor: Disconnecting because MAX_LINE_LENGTH exceeded\n"); + LogWarning("tor: Disconnecting because MAX_LINE_LENGTH exceeded"); self->Disconnect(); } } @@ -150,14 +150,14 @@ bool TorControlConnection::Connect(const std::string& tor_control_center, const const std::optional control_service{Lookup(tor_control_center, DEFAULT_TOR_CONTROL_PORT, fNameLookup)}; if (!control_service.has_value()) { - LogPrintf("tor: Failed to look up control center %s\n", tor_control_center); + LogWarning("tor: Failed to look up control center %s", tor_control_center); return false; } struct sockaddr_storage control_address; socklen_t control_address_len = sizeof(control_address); if (!control_service.value().GetSockAddr(reinterpret_cast(&control_address), &control_address_len)) { - LogPrintf("tor: Error parsing socket address %s\n", tor_control_center); + LogWarning("tor: Error parsing socket address %s", tor_control_center); return false; } @@ -173,7 +173,7 @@ bool TorControlConnection::Connect(const std::string& tor_control_center, const // Finally, connect to tor_control_center if (bufferevent_socket_connect(b_conn, reinterpret_cast(&control_address), control_address_len) < 0) { - LogPrintf("tor: Error connecting to address %s\n", tor_control_center); + LogWarning("tor: Error connecting to address %s", tor_control_center); return false; } return true; @@ -325,11 +325,11 @@ TorController::TorController(struct event_base* _base, const std::string& tor_co { reconnect_ev = event_new(base, -1, 0, reconnect_cb, this); if (!reconnect_ev) - LogPrintf("tor: Failed to create event for reconnection: out of memory?\n"); + LogWarning("tor: Failed to create event for reconnection: out of memory?"); // Start connection attempts immediately if (!conn.Connect(m_tor_control_center, std::bind(&TorController::connected_cb, this, std::placeholders::_1), std::bind(&TorController::disconnected_cb, this, std::placeholders::_1) )) { - LogPrintf("tor: Initiating connection to Tor control port %s failed\n", m_tor_control_center); + LogWarning("tor: Initiating connection to Tor control port %s failed", m_tor_control_center); } // Read service private key if cached std::pair pkf = ReadBinaryFile(GetPrivateKeyFile()); @@ -377,12 +377,12 @@ void TorController::get_socks_cb(TorControlConnection& _conn, const TorControlRe if (!socks_location.empty()) { LogDebug(BCLog::TOR, "Get SOCKS port command yielded %s\n", socks_location); } else { - LogPrintf("tor: Get SOCKS port command returned nothing\n"); + LogWarning("tor: Get SOCKS port command returned nothing"); } } else if (reply.code == 510) { // 510 Unrecognized command - LogPrintf("tor: Get SOCKS port command failed with unrecognized command (You probably should upgrade Tor)\n"); + LogWarning("tor: Get SOCKS port command failed with unrecognized command (You probably should upgrade Tor)"); } else { - LogPrintf("tor: Get SOCKS port command failed; error code %d\n", reply.code); + LogWarning("tor: Get SOCKS port command failed; error code %d", reply.code); } CService resolved; @@ -430,9 +430,9 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe private_key = i->second; } if (service_id.empty()) { - LogPrintf("tor: Error parsing ADD_ONION parameters:\n"); + LogWarning("tor: Error parsing ADD_ONION parameters:"); for (const std::string &s : reply.lines) { - LogPrintf(" %s\n", SanitizeString(s)); + LogWarning(" %s", SanitizeString(s)); } return; } @@ -441,14 +441,14 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) { LogDebug(BCLog::TOR, "Cached service private key to %s\n", fs::PathToString(GetPrivateKeyFile())); } else { - LogPrintf("tor: Error writing service private key to %s\n", fs::PathToString(GetPrivateKeyFile())); + LogWarning("tor: Error writing service private key to %s", fs::PathToString(GetPrivateKeyFile())); } AddLocal(service, LOCAL_MANUAL); // ... onion requested - keep connection open } else if (reply.code == 510) { // 510 Unrecognized command - LogPrintf("tor: Add onion failed with unrecognized command (You probably need to upgrade Tor)\n"); + LogWarning("tor: Add onion failed with unrecognized command (You probably need to upgrade Tor)"); } else { - LogPrintf("tor: Add onion failed; error code %d\n", reply.code); + LogWarning("tor: Add onion failed; error code %d", reply.code); } } @@ -472,7 +472,7 @@ void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply& _conn.Command(strprintf("ADD_ONION %s Port=%i,%s", private_key, Params().GetDefaultPort(), m_target.ToStringAddrPort()), std::bind(&TorController::add_onion_cb, this, std::placeholders::_1, std::placeholders::_2)); } else { - LogPrintf("tor: Authentication failed\n"); + LogWarning("tor: Authentication failed"); } } @@ -511,30 +511,30 @@ void TorController::authchallenge_cb(TorControlConnection& _conn, const TorContr if (l.first == "AUTHCHALLENGE") { std::map m = ParseTorReplyMapping(l.second); if (m.empty()) { - LogPrintf("tor: Error parsing AUTHCHALLENGE parameters: %s\n", SanitizeString(l.second)); + LogWarning("tor: Error parsing AUTHCHALLENGE parameters: %s", SanitizeString(l.second)); return; } std::vector serverHash = ParseHex(m["SERVERHASH"]); std::vector serverNonce = ParseHex(m["SERVERNONCE"]); LogDebug(BCLog::TOR, "AUTHCHALLENGE ServerHash %s ServerNonce %s\n", HexStr(serverHash), HexStr(serverNonce)); if (serverNonce.size() != 32) { - LogPrintf("tor: ServerNonce is not 32 bytes, as required by spec\n"); + LogWarning("tor: ServerNonce is not 32 bytes, as required by spec"); return; } std::vector computedServerHash = ComputeResponse(TOR_SAFE_SERVERKEY, cookie, clientNonce, serverNonce); if (computedServerHash != serverHash) { - LogPrintf("tor: ServerHash %s does not match expected ServerHash %s\n", HexStr(serverHash), HexStr(computedServerHash)); + LogWarning("tor: ServerHash %s does not match expected ServerHash %s", HexStr(serverHash), HexStr(computedServerHash)); return; } std::vector computedClientHash = ComputeResponse(TOR_SAFE_CLIENTKEY, cookie, clientNonce, serverNonce); _conn.Command("AUTHENTICATE " + HexStr(computedClientHash), std::bind(&TorController::auth_cb, this, std::placeholders::_1, std::placeholders::_2)); } else { - LogPrintf("tor: Invalid reply to AUTHCHALLENGE\n"); + LogWarning("tor: Invalid reply to AUTHCHALLENGE"); } } else { - LogPrintf("tor: SAFECOOKIE authentication challenge failed\n"); + LogWarning("tor: SAFECOOKIE authentication challenge failed"); } } @@ -582,7 +582,7 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro ReplaceAll(torpassword, "\"", "\\\""); _conn.Command("AUTHENTICATE \"" + torpassword + "\"", std::bind(&TorController::auth_cb, this, std::placeholders::_1, std::placeholders::_2)); } else { - LogPrintf("tor: Password provided with -torpassword, but HASHEDPASSWORD authentication is not available\n"); + LogWarning("tor: Password provided with -torpassword, but HASHEDPASSWORD authentication is not available"); } } else if (methods.count("NULL")) { LogDebug(BCLog::TOR, "Using NULL authentication\n"); @@ -599,18 +599,18 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro _conn.Command("AUTHCHALLENGE SAFECOOKIE " + HexStr(clientNonce), std::bind(&TorController::authchallenge_cb, this, std::placeholders::_1, std::placeholders::_2)); } else { if (status_cookie.first) { - LogPrintf("tor: Authentication cookie %s is not exactly %i bytes, as is required by the spec\n", cookiefile, TOR_COOKIE_SIZE); + LogWarning("tor: Authentication cookie %s is not exactly %i bytes, as is required by the spec", cookiefile, TOR_COOKIE_SIZE); } else { - LogPrintf("tor: Authentication cookie %s could not be opened (check permissions)\n", cookiefile); + LogWarning("tor: Authentication cookie %s could not be opened (check permissions)", cookiefile); } } } else if (methods.count("HASHEDPASSWORD")) { - LogPrintf("tor: The only supported authentication mechanism left is password, but no password provided with -torpassword\n"); + LogWarning("tor: The only supported authentication mechanism left is password, but no password provided with -torpassword"); } else { - LogPrintf("tor: No supported authentication method\n"); + LogWarning("tor: No supported authentication method"); } } else { - LogPrintf("tor: Requesting protocol info failed\n"); + LogWarning("tor: Requesting protocol info failed"); } } @@ -619,7 +619,7 @@ void TorController::connected_cb(TorControlConnection& _conn) reconnect_timeout = RECONNECT_TIMEOUT_START; // First send a PROTOCOLINFO command to figure out what authentication is expected if (!_conn.Command("PROTOCOLINFO 1", std::bind(&TorController::protocolinfo_cb, this, std::placeholders::_1, std::placeholders::_2))) - LogPrintf("tor: Error sending initial protocolinfo command\n"); + LogWarning("tor: Error sending initial protocolinfo command"); } void TorController::disconnected_cb(TorControlConnection& _conn) @@ -647,7 +647,7 @@ void TorController::Reconnect() */ if (!conn.Connect(m_tor_control_center, std::bind(&TorController::connected_cb, this, std::placeholders::_1), std::bind(&TorController::disconnected_cb, this, std::placeholders::_1) )) { - LogPrintf("tor: Re-initiating connection to Tor control port %s failed\n", m_tor_control_center); + LogWarning("tor: Re-initiating connection to Tor control port %s failed", m_tor_control_center); } } @@ -683,7 +683,7 @@ void StartTorControl(CService onion_service_target) #endif gBase = event_base_new(); if (!gBase) { - LogPrintf("tor: Unable to create event_base\n"); + LogWarning("tor: Unable to create event_base"); return; } diff --git a/src/util/batchpriority.cpp b/src/util/batchpriority.cpp index c73aef1eb4..c0ed12339b 100644 --- a/src/util/batchpriority.cpp +++ b/src/util/batchpriority.cpp @@ -20,7 +20,7 @@ void ScheduleBatchPriority() const static sched_param param{}; const int rc = pthread_setschedparam(pthread_self(), SCHED_BATCH, ¶m); if (rc != 0) { - LogPrintf("Failed to pthread_setschedparam: %s\n", SysErrorString(rc)); + LogWarning("Failed to pthread_setschedparam: %s", SysErrorString(rc)); } #endif } diff --git a/src/util/exception.cpp b/src/util/exception.cpp index d961f0540f..3f8be7d1d7 100644 --- a/src/util/exception.cpp +++ b/src/util/exception.cpp @@ -36,6 +36,6 @@ static std::string FormatException(const std::exception* pex, std::string_view t void PrintExceptionContinue(const std::exception* pex, std::string_view thread_name) { std::string message = FormatException(pex, thread_name); - LogPrintf("\n\n************************\n%s\n", message); + LogWarning("\n\n************************\n%s", message); tfm::format(std::cerr, "\n\n************************\n%s\n", message); } diff --git a/src/util/sock.cpp b/src/util/sock.cpp index e896b87160..7504dc5a88 100644 --- a/src/util/sock.cpp +++ b/src/util/sock.cpp @@ -409,7 +409,7 @@ void Sock::Close() int ret = close(m_socket); #endif if (ret) { - LogPrintf("Error closing socket %d: %s\n", m_socket, NetworkErrorString(WSAGetLastError())); + LogWarning("Error closing socket %d: %s", m_socket, NetworkErrorString(WSAGetLastError())); } m_socket = INVALID_SOCKET; } diff --git a/src/validation.cpp b/src/validation.cpp index f5c795f387..3078cb4950 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2052,7 +2052,7 @@ void Chainstate::CheckForkWarningConditions() } if (m_chainman.m_best_invalid && m_chainman.m_best_invalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)) { - LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__); + LogWarning("Found invalid chain at least ~6 blocks longer than our best chain. Chain state database corruption likely."); m_chainman.GetNotifications().warningSet( kernel::Warning::LARGE_WORK_INVALID_CHAIN, _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.")); @@ -2972,7 +2972,7 @@ void Chainstate::ForceFlushStateToDisk() { BlockValidationState state; if (!this->FlushStateToDisk(state, FlushStateMode::ALWAYS)) { - LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString()); + LogWarning("Failed to force flush state (%s)", state.ToString()); } } @@ -2981,7 +2981,7 @@ void Chainstate::PruneAndFlush() BlockValidationState state; m_blockman.m_check_for_pruning = true; if (!this->FlushStateToDisk(state, FlushStateMode::NONE)) { - LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString()); + LogWarning("Failed to flush state (%s)", state.ToString()); } } @@ -4697,7 +4697,7 @@ void PruneBlockFilesManual(Chainstate& active_chainstate, int nManualPruneHeight BlockValidationState state; if (!active_chainstate.FlushStateToDisk( state, FlushStateMode::NONE, nManualPruneHeight)) { - LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString()); + LogWarning("Failed to flush state after manual prune (%s)", state.ToString()); } } @@ -4845,7 +4845,7 @@ VerifyDBResult CVerifyDB::VerifyDB( return VerifyDBResult::CORRUPTED_BLOCK_DB; } if (skipped_l3_checks) { - LogPrintf("Skipped verification of level >=3 (insufficient database cache size). Consider increasing -dbcache.\n"); + LogWarning("Skipped verification of level >=3 (insufficient database cache size). Consider increasing -dbcache."); } // store block count as we move pindex at check level >= 4 @@ -5684,7 +5684,7 @@ Chainstate& ChainstateManager::InitializeChainstate(CTxMemPool* mempool) try { bool existed = fs::remove(base_blockhash_path); if (!existed) { - LogPrintf("[snapshot] snapshot chainstate dir being removed lacks %s file\n", + LogWarning("[snapshot] snapshot chainstate dir being removed lacks %s file", fs::PathToString(node::SNAPSHOT_BLOCKHASH_FILENAME)); } } catch (const fs::filesystem_error& e) { @@ -6162,8 +6162,8 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation() }; if (index_new.GetBlockHash() != snapshot_blockhash) { - LogPrintf("[snapshot] supposed base block %s does not match the " - "snapshot base block %s (height %d). Snapshot is not valid.\n", + LogWarning("[snapshot] supposed base block %s does not match the " + "snapshot base block %s (height %d). Snapshot is not valid.", index_new.ToString(), snapshot_blockhash.ToString(), snapshot_base_height); handle_invalid_snapshot(); return SnapshotCompletionResult::BASE_BLOCKHASH_MISMATCH; @@ -6183,8 +6183,8 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation() const auto& maybe_au_data = m_options.chainparams.AssumeutxoForHeight(curr_height); if (!maybe_au_data) { - LogPrintf("[snapshot] assumeutxo data not found for height " - "(%d) - refusing to validate snapshot\n", curr_height); + LogWarning("[snapshot] assumeutxo data not found for height " + "(%d) - refusing to validate snapshot", curr_height); handle_invalid_snapshot(); return SnapshotCompletionResult::MISSING_CHAINPARAMS; } @@ -6205,7 +6205,7 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation() // XXX note that this function is slow and will hold cs_main for potentially minutes. if (!maybe_ibd_stats) { - LogPrintf("[snapshot] failed to generate stats for validation coins db\n"); + LogWarning("[snapshot] failed to generate stats for validation coins db"); // While this isn't a problem with the snapshot per se, this condition // prevents us from validating the snapshot, so we should shut down and let the // user handle the issue manually. @@ -6221,7 +6221,7 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation() // hash for the snapshot when it's loaded in its chainstate's leveldb. We could then // reference that here for an additional check. if (AssumeutxoHash{ibd_stats.hashSerialized} != au_data.hash_serialized) { - LogPrintf("[snapshot] hash mismatch: actual=%s, expected=%s\n", + LogWarning("[snapshot] hash mismatch: actual=%s, expected=%s", ibd_stats.hashSerialized.ToString(), au_data.hash_serialized.ToString()); handle_invalid_snapshot(); @@ -6539,8 +6539,8 @@ bool ChainstateManager::ValidatedSnapshotCleanup() if (!DeleteCoinsDBFromDisk(tmp_old, /*is_snapshot=*/false)) { // No need to FatalError because once the unneeded bg chainstate data is // moved, it will not interfere with subsequent initialization. - LogPrintf("Deletion of %s failed. Please remove it manually, as the " - "directory is now unnecessary.\n", + LogWarning("Deletion of %s failed. Please remove it manually, as the " + "directory is now unnecessary.", fs::PathToString(tmp_old)); } else { LogPrintf("[snapshot] deleted background chainstate directory (%s)\n", diff --git a/src/wallet/migrate.cpp b/src/wallet/migrate.cpp index d2c163027d..d1eb620e17 100644 --- a/src/wallet/migrate.cpp +++ b/src/wallet/migrate.cpp @@ -714,7 +714,7 @@ bool BerkeleyRODatabase::Backup(const std::string& dest) const } try { if (fs::exists(dst) && fs::equivalent(src, dst)) { - LogPrintf("cannot backup to wallet source file %s\n", fs::PathToString(dst)); + LogWarning("cannot backup to wallet source file %s", fs::PathToString(dst)); return false; } diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 40c95d5b4b..0bc2339f97 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -262,7 +262,7 @@ bool LegacyDataSPKM::CheckDecryptionKey(const CKeyingMaterial& master_key) } if (keyPass && keyFail) { - LogPrintf("The wallet is probably corrupted: Some keys decrypt but not all.\n"); + LogWarning("The wallet is probably corrupted: Some keys decrypt but not all."); throw std::runtime_error("Error unlocking wallet: some keys decrypt but not all. Your wallet file may be corrupt."); } if (keyFail || !keyPass) @@ -1818,7 +1818,7 @@ std::optional LegacyDataSPKM::MigrateToDescriptor() WalletBatch batch(m_storage.GetDatabase()); if (!batch.TxnBegin()) { - LogPrintf("Error generating descriptors for migration, cannot initialize db transaction\n"); + LogWarning("Error generating descriptors for migration, cannot initialize db transaction"); return std::nullopt; } @@ -2054,7 +2054,7 @@ std::optional LegacyDataSPKM::MigrateToDescriptor() // Finalize transaction if (!batch.TxnCommit()) { - LogPrintf("Error generating descriptors for migration, cannot commit db transaction\n"); + LogWarning("Error generating descriptors for migration, cannot commit db transaction"); return std::nullopt; } @@ -2144,7 +2144,7 @@ bool DescriptorScriptPubKeyMan::CheckDecryptionKey(const CKeyingMaterial& master break; } if (keyPass && keyFail) { - LogPrintf("The wallet is probably corrupted: Some keys decrypt but not all.\n"); + LogWarning("The wallet is probably corrupted: Some keys decrypt but not all."); throw std::runtime_error("Error unlocking wallet: some keys decrypt but not all. Your wallet file may be corrupt."); } if (keyFail || !keyPass) { diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp index a8c9f8a8ab..b250762609 100644 --- a/src/wallet/sqlite.cpp +++ b/src/wallet/sqlite.cpp @@ -40,7 +40,7 @@ static void ErrorLogCallback(void* arg, int code, const char* msg) // invoked." // Assert that this is the case: assert(arg == nullptr); - LogPrintf("SQLite Error. Code: %d. Message: %s\n", code, msg); + LogWarning("SQLite Error. Code: %d. Message: %s", code, msg); } static int TraceSqlCallback(unsigned code, void* context, void* param1, void* param2) @@ -69,7 +69,7 @@ static bool BindBlobToStatement(sqlite3_stmt* stmt, // instead of the empty blob value X'', which would mess up SQL comparisons. int res = sqlite3_bind_blob(stmt, index, blob.data() ? static_cast(blob.data()) : "", blob.size(), SQLITE_STATIC); if (res != SQLITE_OK) { - LogPrintf("Unable to bind %s to statement: %s\n", description, sqlite3_errstr(res)); + LogWarning("Unable to bind %s to statement: %s", description, sqlite3_errstr(res)); sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); return false; @@ -182,7 +182,7 @@ void SQLiteDatabase::Cleanup() noexcept if (--g_sqlite_count == 0) { int ret = sqlite3_shutdown(); if (ret != SQLITE_OK) { - LogPrintf("SQLiteDatabase: Failed to shutdown SQLite: %s\n", sqlite3_errstr(ret)); + LogWarning("SQLiteDatabase: Failed to shutdown SQLite: %s", sqlite3_errstr(ret)); } } } @@ -267,7 +267,7 @@ void SQLiteDatabase::Open() if (LogAcceptCategory(BCLog::WALLETDB, BCLog::Level::Trace)) { ret = sqlite3_trace_v2(m_db, SQLITE_TRACE_STMT, TraceSqlCallback, this); if (ret != SQLITE_OK) { - LogPrintf("Failed to enable SQL tracing for %s\n", Filename()); + LogWarning("Failed to enable SQL tracing for %s", Filename()); } } } @@ -294,7 +294,7 @@ void SQLiteDatabase::Open() if (m_use_unsafe_sync) { // Use normal synchronous mode for the journal - LogPrintf("WARNING SQLite is configured to not wait for data to be flushed to disk. Data loss and corruption may occur.\n"); + LogWarning("SQLite is configured to not wait for data to be flushed to disk. Data loss and corruption may occur."); SetPragma(m_db, "synchronous", "OFF", "Failed to set synchronous mode to OFF"); } @@ -353,14 +353,14 @@ bool SQLiteDatabase::Backup(const std::string& dest) const } sqlite3_backup* backup = sqlite3_backup_init(db_copy, "main", m_db, "main"); if (!backup) { - LogPrintf("%s: Unable to begin backup: %s\n", __func__, sqlite3_errmsg(m_db)); + LogWarning("Unable to begin sqlite backup: %s", sqlite3_errmsg(m_db)); sqlite3_close(db_copy); return false; } // Specifying -1 will copy all of the pages res = sqlite3_backup_step(backup, -1); if (res != SQLITE_DONE) { - LogPrintf("%s: Unable to backup: %s\n", __func__, sqlite3_errstr(res)); + LogWarning("Unable to continue sqlite backup: %s", sqlite3_errstr(res)); sqlite3_backup_finish(backup); sqlite3_close(db_copy); return false; @@ -412,13 +412,13 @@ void SQLiteBatch::Close() // If we began a transaction, and it wasn't committed, abort the transaction in progress if (m_txn) { if (TxnAbort()) { - LogPrintf("SQLiteBatch: Batch closed unexpectedly without the transaction being explicitly committed or aborted\n"); + LogWarning("SQLiteBatch: Batch closed unexpectedly without the transaction being explicitly committed or aborted"); } else { // If transaction cannot be aborted, it means there is a bug or there has been data corruption. Try to recover in this case // by closing and reopening the database. Closing the database should also ensure that any changes made since the transaction // was opened will be rolled back and future transactions can succeed without committing old data. force_conn_refresh = true; - LogPrintf("SQLiteBatch: Batch closed and failed to abort transaction, resetting db connection..\n"); + LogWarning("SQLiteBatch: Batch closed and failed to abort transaction, resetting db connection.."); } } @@ -434,7 +434,7 @@ void SQLiteBatch::Close() for (const auto& [stmt_prepared, stmt_description] : statements) { int res = sqlite3_finalize(*stmt_prepared); if (res != SQLITE_OK) { - LogPrintf("SQLiteBatch: Batch closed but could not finalize %s statement: %s\n", + LogWarning("SQLiteBatch: Batch closed but could not finalize %s statement: %s", stmt_description, sqlite3_errstr(res)); } *stmt_prepared = nullptr; @@ -465,7 +465,7 @@ bool SQLiteBatch::ReadKey(DataStream&& key, DataStream& value) if (res != SQLITE_ROW) { if (res != SQLITE_DONE) { // SQLITE_DONE means "not found", don't log an error in that case. - LogPrintf("%s: Unable to execute statement: %s\n", __func__, sqlite3_errstr(res)); + LogWarning("Unable to execute read statement: %s", sqlite3_errstr(res)); } sqlite3_clear_bindings(m_read_stmt); sqlite3_reset(m_read_stmt); @@ -505,7 +505,7 @@ bool SQLiteBatch::WriteKey(DataStream&& key, DataStream&& value, bool overwrite) sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); if (res != SQLITE_DONE) { - LogPrintf("%s: Unable to execute statement: %s\n", __func__, sqlite3_errstr(res)); + LogWarning("Unable to execute write statement: %s", sqlite3_errstr(res)); } if (!m_txn) m_database.m_write_semaphore.post(); @@ -529,7 +529,7 @@ bool SQLiteBatch::ExecStatement(sqlite3_stmt* stmt, Span blob) sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); if (res != SQLITE_DONE) { - LogPrintf("%s: Unable to execute statement: %s\n", __func__, sqlite3_errstr(res)); + LogWarning("Unable to execute exec statement: %s", sqlite3_errstr(res)); } if (!m_txn) m_database.m_write_semaphore.post(); @@ -567,7 +567,7 @@ DatabaseCursor::Status SQLiteCursor::Next(DataStream& key, DataStream& value) return Status::DONE; } if (res != SQLITE_ROW) { - LogPrintf("%s: Unable to execute cursor step: %s\n", __func__, sqlite3_errstr(res)); + LogWarning("Unable to execute cursor step: %s", sqlite3_errstr(res)); return Status::FAIL; } @@ -586,8 +586,8 @@ SQLiteCursor::~SQLiteCursor() sqlite3_reset(m_cursor_stmt); int res = sqlite3_finalize(m_cursor_stmt); if (res != SQLITE_OK) { - LogPrintf("%s: cursor closed but could not finalize cursor statement: %s\n", - __func__, sqlite3_errstr(res)); + LogWarning("Cursor closed but could not finalize cursor statement: %s", + sqlite3_errstr(res)); } } @@ -655,7 +655,7 @@ bool SQLiteBatch::TxnBegin() Assert(!m_database.HasActiveTxn()); int res = Assert(m_exec_handler)->Exec(m_database, "BEGIN TRANSACTION"); if (res != SQLITE_OK) { - LogPrintf("SQLiteBatch: Failed to begin the transaction\n"); + LogWarning("SQLiteBatch: Failed to begin the transaction"); m_database.m_write_semaphore.post(); } else { m_txn = true; @@ -669,7 +669,7 @@ bool SQLiteBatch::TxnCommit() Assert(m_database.HasActiveTxn()); int res = Assert(m_exec_handler)->Exec(m_database, "COMMIT TRANSACTION"); if (res != SQLITE_OK) { - LogPrintf("SQLiteBatch: Failed to commit the transaction\n"); + LogWarning("SQLiteBatch: Failed to commit the transaction"); } else { m_txn = false; m_database.m_write_semaphore.post(); @@ -683,7 +683,7 @@ bool SQLiteBatch::TxnAbort() Assert(m_database.HasActiveTxn()); int res = Assert(m_exec_handler)->Exec(m_database, "ROLLBACK TRANSACTION"); if (res != SQLITE_OK) { - LogPrintf("SQLiteBatch: Failed to abort the transaction\n"); + LogWarning("SQLiteBatch: Failed to abort the transaction"); } else { m_txn = false; m_database.m_write_semaphore.post(); diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index 9a0130462b..56183c6d94 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -128,7 +128,7 @@ def test_config_file_parser(self): with open(inc_conf_file_path, 'w', encoding='utf-8') as conf: conf.write('reindex=1\n') - with self.nodes[0].assert_debug_log(expected_msgs=['Warning: reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary']): + with self.nodes[0].assert_debug_log(expected_msgs=["[warning] reindex=1 is set in the configuration file, which will significantly slow down startup. Consider removing or commenting out this option for better performance, unless there is currently a condition which makes rebuilding the indexes necessary"]): self.start_node(0) self.stop_node(0) @@ -228,7 +228,7 @@ def test_invalid_command_line_options(self): ) def test_log_buffer(self): - with self.nodes[0].assert_debug_log(expected_msgs=['Warning: parsed potentially confusing double-negative -listen=0\n']): + with self.nodes[0].assert_debug_log(expected_msgs=["[warning] Parsed potentially confusing double-negative -listen=0\n"]): self.start_node(0, extra_args=['-nolisten=0']) self.stop_node(0) From 5efb4cf5c30ff49f1bb1300d8c516d55cee1c3f5 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 13 Jan 2026 07:39:53 +0000 Subject: [PATCH 172/356] Wallet/bdb: Use LogWarning/LogError as appropriate --- src/wallet/bdb.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index 79851dff33..9ae965213e 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -115,7 +115,7 @@ void BerkeleyEnvironment::Close() int ret = dbenv->close(0); if (ret != 0) - LogPrintf("BerkeleyEnvironment::Close: Error %d closing database environment: %s\n", ret, DbEnv::strerror(ret)); + LogWarning("BerkeleyEnvironment::Close: Error %d closing database environment: %s", ret, DbEnv::strerror(ret)); if (!fMockDb) DbEnv(uint32_t{0}).remove(strPath.c_str(), 0); @@ -152,7 +152,7 @@ bool BerkeleyEnvironment::Open(bilingual_str& err) fs::path pathIn = fs::PathFromString(strPath); TryCreateDirectories(pathIn); if (util::LockDirectory(pathIn, ".walletlock") != util::LockResult::Success) { - LogPrintf("Cannot obtain a lock on wallet directory %s. Another instance may be using it.\n", strPath); + LogWarning("Cannot obtain a lock on wallet directory %s. Another instance may be using it.", strPath); err = strprintf(_("Error initializing wallet database environment %s!"), fs::quoted(fs::PathToString(Directory()))); return false; } @@ -188,10 +188,10 @@ bool BerkeleyEnvironment::Open(bilingual_str& err) nEnvFlags, S_IRUSR | S_IWUSR); if (ret != 0) { - LogPrintf("BerkeleyEnvironment::Open: Error %d opening database environment: %s\n", ret, DbEnv::strerror(ret)); + LogWarning("BerkeleyEnvironment::Open: Error %d opening database environment: %s", ret, DbEnv::strerror(ret)); int ret2 = dbenv->close(0); if (ret2 != 0) { - LogPrintf("BerkeleyEnvironment::Open: Error %d closing failed database environment: %s\n", ret2, DbEnv::strerror(ret2)); + LogWarning("BerkeleyEnvironment::Open: Error %d closing failed database environment: %s", ret2, DbEnv::strerror(ret2)); } Reset(); err = strprintf(_("Error initializing wallet database environment %s!"), fs::quoted(fs::PathToString(Directory()))); @@ -542,7 +542,7 @@ bool BerkeleyDatabase::Rewrite(const char* pszSkip) DB_CREATE, // Flags 0); if (ret > 0) { - LogPrintf("BerkeleyBatch::Rewrite: Can't create database file %s\n", strFileRes); + LogWarning("BerkeleyBatch::Rewrite: Can't create database file %s", strFileRes); fSuccess = false; } @@ -592,7 +592,7 @@ bool BerkeleyDatabase::Rewrite(const char* pszSkip) fSuccess = false; } if (!fSuccess) - LogPrintf("BerkeleyBatch::Rewrite: Failed to rewrite database file %s\n", strFileRes); + LogWarning("BerkeleyBatch::Rewrite: Failed to rewrite database file %s", strFileRes); return fSuccess; } } @@ -694,7 +694,7 @@ bool BerkeleyDatabase::Backup(const std::string& strDest) const try { if (fs::exists(pathDest) && fs::equivalent(pathSrc, pathDest)) { - LogPrintf("cannot backup to wallet source file %s\n", fs::PathToString(pathDest)); + LogWarning("cannot backup to wallet source file %s", fs::PathToString(pathDest)); return false; } @@ -702,7 +702,7 @@ bool BerkeleyDatabase::Backup(const std::string& strDest) const LogPrintf("copied %s to %s\n", strFile, fs::PathToString(pathDest)); return true; } catch (const fs::filesystem_error& e) { - LogPrintf("error copying %s to %s - %s\n", strFile, fs::PathToString(pathDest), fsbridge::get_filesystem_error_message(e)); + LogWarning("error copying %s to %s - %s", strFile, fs::PathToString(pathDest), fsbridge::get_filesystem_error_message(e)); return false; } } @@ -830,7 +830,7 @@ bool BerkeleyDatabaseSanityCheck() * than the header that was compiled against, flag an error. */ if (major != DB_VERSION_MAJOR || minor < DB_VERSION_MINOR) { - LogPrintf("BerkeleyDB database version conflict: header version is %d.%d, library version is %d.%d\n", + LogError("BerkeleyDB database version conflict: header version is %d.%d, library version is %d.%d", DB_VERSION_MAJOR, DB_VERSION_MINOR, major, minor); return false; } From 232b340b57b7d910b52b73d51ccfa6896b2e4eb0 Mon Sep 17 00:00:00 2001 From: brunoerg Date: Tue, 2 Dec 2025 10:00:36 +0400 Subject: [PATCH 173/356] init: point out -stopatheight may be imprecise Github-Pull: bitcoin/bitcoin#33993 Rebased-From: ff06e2468a5d3eeebeffe781904c34c9d1b44385 --- src/init.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/init.cpp b/src/init.cpp index 495d96f938..408f5276ba 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -521,7 +521,7 @@ void SetupServerArgs(NodeContext& node) argsman.AddArg("-deprecatedrpc=", "Allows deprecated RPC method(s) to be used", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-dropmessagestest=", "Randomly drop 1 of every network messages", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-stopafterblockimport", strprintf("Stop running after importing blocks from disk (default: %u)", DEFAULT_STOPAFTERBLOCKIMPORT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u)", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-stopatheight", strprintf("Stop running after reaching the given height in the main chain (default: %u). Blocks after target height may be processed during shutdown.", DEFAULT_STOPATHEIGHT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-limitancestorcount=", strprintf("Do not accept transactions if number of in-mempool ancestors is or more (default: %u)", DEFAULT_ANCESTOR_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-limitancestorsize=", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-limitdescendantcount=", strprintf("Do not accept transactions if any ancestor would have or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); From 71633a9b5c10f0d6a1a1e31bcbf51de2e27649d9 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 1 Sep 2025 13:38:28 -0700 Subject: [PATCH 174/356] test: Test wallet 'from me' status change If something is imported into the wallet, it can change the 'from me' status of a transaction. This status is only visible through gettransaction's "fee" field which is only shown for transactions that are 'from me'. Github-Pull: #33268 Rebased-From: e76c2f7a4111f87080e31539f83c21390fcd8f3b --- test/functional/wallet_listtransactions.py | 47 ++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/test/functional/wallet_listtransactions.py b/test/functional/wallet_listtransactions.py index 6263278a6c..ce52865727 100755 --- a/test/functional/wallet_listtransactions.py +++ b/test/functional/wallet_listtransactions.py @@ -5,9 +5,11 @@ """Test the listtransactions API.""" from decimal import Decimal +import time import os import shutil +from test_framework.blocktools import MAX_FUTURE_BLOCK_TIME from test_framework.messages import ( COIN, tx_from_hex, @@ -17,7 +19,9 @@ assert_array_result, assert_equal, assert_raises_rpc_error, + find_vout_for_address, ) +from test_framework.wallet_util import get_generate_key class ListTransactionsTest(BitcoinTestFramework): @@ -114,6 +118,8 @@ def run_test(self): self.run_invalid_parameters_test() self.test_op_return() + self.test_from_me_status_change() + def run_rbf_opt_in_test(self): """Test the opt-in-rbf flag for sent and received transactions.""" @@ -327,6 +333,47 @@ def test_op_return(self): assert 'address' not in op_ret_tx + def test_from_me_status_change(self): + self.log.info("Test gettransaction after changing a transaction's 'from me' status") + self.nodes[0].createwallet("fromme") + default_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name) + wallet = self.nodes[0].get_wallet_rpc("fromme") + + # The 'fee' field of gettransaction is only added when the transaction is 'from me' + # Run twice, once for a transaction in the mempool, again when it confirms + for confirm in [False, True]: + key = get_generate_key() + default_wallet.importprivkey(key.privkey) + + send_res = default_wallet.send(outputs=[{key.p2wpkh_addr: 1}, {wallet.getnewaddress(): 1}]) + assert_equal(send_res["complete"], True) + vout = find_vout_for_address(self.nodes[0], send_res["txid"], key.p2wpkh_addr) + utxos = [{"txid": send_res["txid"], "vout": vout}] + self.generate(self.nodes[0], 1, sync_fun=self.no_op) + + # Send to the test wallet, ensuring that one input is for the descriptor we will import, + # and that there are other inputs belonging to only the sending wallet + send_res = default_wallet.send(outputs=[{wallet.getnewaddress(): 1.5}], inputs=utxos, add_inputs=True) + assert_equal(send_res["complete"], True) + txid = send_res["txid"] + self.nodes[0].syncwithvalidationinterfacequeue() + tx_info = wallet.gettransaction(txid) + assert "fee" not in tx_info + assert_equal(any(detail["category"] == "send" for detail in tx_info["details"]), False) + + if confirm: + self.generate(self.nodes[0], 1, sync_fun=self.no_op) + # Mock time forward and generate blocks so that the import does not rescan the transaction + self.nodes[0].setmocktime(int(time.time()) + MAX_FUTURE_BLOCK_TIME + 1) + self.generate(self.nodes[0], 10, sync_fun=self.no_op) + + wallet.importprivkey(key.privkey) + # TODO: We should check that the fee matches, but since the transaction spends inputs + # not known to the wallet, it is incorrectly calculating the fee. + # assert_equal(wallet.gettransaction(txid)["fee"], fee) + tx_info = wallet.gettransaction(txid) + assert "fee" in tx_info + assert_equal(any(detail["category"] == "send" for detail in tx_info["details"]), True) if __name__ == '__main__': ListTransactionsTest(__file__).main() From bab1ac827b4fdd4984661f32f6b899d56261da5d Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Thu, 28 Aug 2025 13:39:46 -0700 Subject: [PATCH 175/356] wallet: Determine IsFromMe by checking for TXOs of inputs Instead of checking whether the total amount of inputs known by the wallet is greater than 0, we should be checking for whether the input is known by the wallet. This enables us to determine whether a transaction spends an of output with an amount of 0, which is necessary for marking 0-value dust outputs as spent. Github-Pull: #33268 Rebased-From: 39a7dbdd277d1dea9a70314d8cc5ae057999ee88 --- src/wallet/wallet.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 2397d84a6f..913c745320 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -1702,7 +1702,13 @@ isminetype CWallet::IsMine(const COutPoint& outpoint) const bool CWallet::IsFromMe(const CTransaction& tx) const { - return (GetDebit(tx, ISMINE_ALL) > 0); + LOCK(cs_wallet); + for (const CTxIn& txin : tx.vin) { + if (IsMine(txin.prevout)) { + return true; + } + } + return false; } CAmount CWallet::GetDebit(const CTransaction& tx, const isminefilter& filter) const From c6e7765c0a03c124fcc86b452d6870b6d2797130 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Thu, 28 Aug 2025 15:13:36 -0700 Subject: [PATCH 176/356] wallet: Throw an error in sendall if the tx size cannot be calculated Github-Pull: #33268 Rebased-From: c40dc822d74aea46e4a21774ca282e008f609c2a --- src/wallet/rpc/spend.cpp | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/wallet/rpc/spend.cpp b/src/wallet/rpc/spend.cpp index 64aae701eb..5c0b1db23e 100644 --- a/src/wallet/rpc/spend.cpp +++ b/src/wallet/rpc/spend.cpp @@ -1486,7 +1486,6 @@ RPCHelpMan sendall() CoinFilterParams coins_params; coins_params.min_amount = 0; for (const COutput& output : AvailableCoins(*pwallet, &coin_control, fee_rate, coins_params).All()) { - CHECK_NONFATAL(output.input_bytes > 0); if (send_max && fee_rate.GetFee(output.input_bytes) > output.txout.nValue) { continue; } @@ -1505,6 +1504,9 @@ RPCHelpMan sendall() // estimate final size of tx const TxSize tx_size{CalculateMaximumSignedTxSize(CTransaction(rawTx), pwallet.get())}; + if (tx_size.vsize == -1) { + throw JSONRPCError(RPC_WALLET_ERROR, "Unable to determine the size of the transaction, the wallet contains unsolvable descriptors"); + } const CAmount fee_from_size{fee_rate.GetFee(tx_size.vsize)}; const std::optional total_bump_fees{pwallet->chain().calculateCombinedBumpFee(outpoints_spent, fee_rate)}; CAmount effective_value = total_input_value - fee_from_size - total_bump_fees.value_or(0); From 163d3e5c137401b0145ec57c7e24947e68f908e6 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 14 Jan 2026 02:21:09 +0000 Subject: [PATCH 177/356] Bugfix: Fee estimation: Refactor logic to avoid unlikely unsigned overflow in TxConfirmStats::Read --- src/policy/fees.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp index e85b2f2caa..7a91336e2b 100644 --- a/src/policy/fees.cpp +++ b/src/policy/fees.cpp @@ -422,7 +422,6 @@ void TxConfirmStats::Read(AutoFile& filein, size_t numBuckets) // Read data file and do some very basic sanity checking // buckets and bucketMap are not updated yet, so don't access them // If there is a read failure, we'll just discard this entire object anyway - size_t maxConfirms, maxPeriods; // The current version will store the decay with each individual TxConfirmStats and also keep a scale factor filein >> Using(decay); @@ -443,10 +442,9 @@ void TxConfirmStats::Read(AutoFile& filein, size_t numBuckets) throw std::runtime_error("Corrupt estimates file. Mismatch in tx count bucket count"); } filein >> Using>>(confAvg); - maxPeriods = confAvg.size(); - maxConfirms = scale * maxPeriods; + const size_t maxPeriods = confAvg.size(); - if (maxConfirms <= 0 || maxConfirms > 6 * 24 * 7) { // one week + if (maxPeriods == 0 || scale > (6 * 24 * 7) / maxPeriods) { // one week throw std::runtime_error("Corrupt estimates file. Must maintain estimates for between 1 and 1008 (one week) confirms"); } for (unsigned int i = 0; i < maxPeriods; i++) { @@ -469,6 +467,7 @@ void TxConfirmStats::Read(AutoFile& filein, size_t numBuckets) // to match the number of confirms and buckets resizeInMemoryCounters(numBuckets); + const size_t maxConfirms = scale * maxPeriods; LogDebug(BCLog::ESTIMATEFEE, "Reading estimates: %u buckets counting confirms up to %u blocks\n", numBuckets, maxConfirms); } From 00354e1161f6b70cbd5e8c6b65329075c8ae6d2a Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 14 Jan 2026 03:29:04 +0000 Subject: [PATCH 178/356] Net: Reduce log level for repeated PCP/NAT-PMP NOT_AUTHORIZED failures by default Users running Bitcoin Core on home networks with routers that don't support PCP (Port Control Protocol) or NAT-PMP port mapping receive frequent warning-level log messages every few minutes: "pcp: Mapping failed with result NOT_AUTHORIZED (code 2)" This is expected behavior for many consumer routers that have PCP disabled by default. The repeated warnings create unnecessary log noise. This change: - Adds explicit constants for NOT_AUTHORIZED result code (value 2) for both NAT-PMP and PCP protocols - Downgrades NOT_AUTHORIZED failures from LogWarning to LogDebug after the first failure since this is an expected condition on many networks, UNLESS the user has explicitly enabled -natpmp - Keeps LogWarning for other failure types that may indicate actual configuration or network issues The NOT_AUTHORIZED message will still be visible when running with -debug=net for users who need to troubleshoot port mapping issues. Fixes #34114 Inspired-by: 78e7259081f16158679b910e1723db13adc18985 (bitcoin/bitcoin#34117) Inspired-by-author: ANAVHEOBA --- src/common/pcp.cpp | 30 ++++++++++++++++++++++++++++++ src/common/pcp.h | 2 ++ src/init.cpp | 4 ++++ src/node/interfaces.cpp | 6 +++++- 4 files changed, 41 insertions(+), 1 deletion(-) diff --git a/src/common/pcp.cpp b/src/common/pcp.cpp index d0d4955470..32c973e540 100644 --- a/src/common/pcp.cpp +++ b/src/common/pcp.cpp @@ -16,6 +16,8 @@ #include #include +bool g_pcp_warn_for_unauthorized{false}; + namespace { // RFC6886 NAT-PMP and RFC6887 Port Control Protocol (PCP) implementation. @@ -80,6 +82,8 @@ constexpr size_t NATPMP_MAP_RESPONSE_LIFETIME_OFS = 12; constexpr uint8_t NATPMP_RESULT_SUCCESS = 0; //! Result code representing unsupported version. constexpr uint8_t NATPMP_RESULT_UNSUPP_VERSION = 1; +//! Result code representing not authorized (router doesn't support port mapping). +constexpr uint8_t NATPMP_RESULT_NOT_AUTHORIZED = 2; //! Result code representing lack of resources. constexpr uint8_t NATPMP_RESULT_NO_RESOURCES = 4; @@ -143,6 +147,8 @@ constexpr size_t PCP_MAP_EXTERNAL_IP_OFS = 20; //! Result code representing success (RFC6887 7.4), shared with NAT-PMP. constexpr uint8_t PCP_RESULT_SUCCESS = NATPMP_RESULT_SUCCESS; +//! Result code representing not authorized (RFC6887 7.4), shared with NAT-PMP. +constexpr uint8_t PCP_RESULT_NOT_AUTHORIZED = NATPMP_RESULT_NOT_AUTHORIZED; //! Result code representing lack of resources (RFC6887 7.4). constexpr uint8_t PCP_RESULT_NO_RESOURCES = 8; @@ -368,6 +374,18 @@ std::variant NATPMPRequestPortMap(const CNetAddr &g Assume(response.size() >= NATPMP_MAP_RESPONSE_SIZE); uint16_t result_code = ReadBE16(response.data() + NATPMP_RESPONSE_HDR_RESULT_OFS); + static bool already_warned_for_unauthorized{false}; + if (result_code == NATPMP_RESULT_NOT_AUTHORIZED) { + if (already_warned_for_unauthorized && !g_pcp_warn_for_unauthorized) { + // NOT_AUTHORIZED is expected on many routers that don't support port mapping. + LogDebug(BCLog::NET, "natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); + return MappingError::PROTOCOL_ERROR; + } else { + already_warned_for_unauthorized = true; + } + } else { + already_warned_for_unauthorized = false; + } if (result_code != NATPMP_RESULT_SUCCESS) { LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "natpmp: Port mapping failed with result %s\n", NATPMPResultString(result_code)); if (result_code == NATPMP_RESULT_NO_RESOURCES) { @@ -501,6 +519,18 @@ std::variant PCPRequestPortMap(const PCPMappingNonc uint32_t lifetime_ret = ReadBE32(response.data() + PCP_HDR_LIFETIME_OFS); uint16_t external_port = ReadBE16(response.data() + PCP_HDR_SIZE + PCP_MAP_EXTERNAL_PORT_OFS); CNetAddr external_addr{PCPUnwrapAddress(response.subspan(PCP_HDR_SIZE + PCP_MAP_EXTERNAL_IP_OFS, ADDR_IPV6_SIZE))}; + static bool already_warned_for_unauthorized{false}; + if (result_code == PCP_RESULT_NOT_AUTHORIZED) { + if (already_warned_for_unauthorized && !g_pcp_warn_for_unauthorized) { + // NOT_AUTHORIZED is expected on many routers that don't support port mapping. + LogDebug(BCLog::NET, "pcp: Mapping failed with result %s\n", PCPResultString(result_code)); + return MappingError::PROTOCOL_ERROR; + } else { + already_warned_for_unauthorized = true; + } + } else { + already_warned_for_unauthorized = false; + } if (result_code != PCP_RESULT_SUCCESS) { LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "pcp: Mapping failed with result %s\n", PCPResultString(result_code)); if (result_code == PCP_RESULT_NO_RESOURCES) { diff --git a/src/common/pcp.h b/src/common/pcp.h index 44f9285c27..d3c9785a5e 100644 --- a/src/common/pcp.h +++ b/src/common/pcp.h @@ -43,6 +43,8 @@ struct MappingResult { std::string ToString() const; }; +extern bool g_pcp_warn_for_unauthorized; + //! Try to open a port using RFC 6886 NAT-PMP. IPv4 only. //! //! * gateway: Destination address for PCP requests (usually the default gateway). diff --git a/src/init.cpp b/src/init.cpp index 7fdbf75dc6..78ad1e0a8c 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -714,6 +715,9 @@ static bool AppInitServers(NodeContext& node) // Parameter interaction based on rules void InitParameterInteraction(ArgsManager& args) { + // Before any SoftSetArg so we get the actual user-set value + g_pcp_warn_for_unauthorized = args.GetBoolArg("-natpmp", false); + // when specifying an explicit binding address, you want to listen on it // even when -connect or -proxy is specified if (!args.GetArgs("-bind").empty()) { diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index f28e5dffbd..93cdaa3604 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -188,7 +189,10 @@ class NodeImpl : public Node }); args().WriteSettingsFile(); } - void mapPort(bool enable) override { StartMapPort(enable); } + void mapPort(bool enable) override { + g_pcp_warn_for_unauthorized = true; + StartMapPort(enable); + } bool getProxy(Network net, Proxy& proxy_info) override { return GetProxy(net, proxy_info); } size_t getNodeCount(ConnectionDirection flags) override { From f4b78c42e557aec29f5ed5e570fb55bf70d2b3b4 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Thu, 28 Aug 2025 15:13:23 -0700 Subject: [PATCH 179/356] test: Add a test for anchor outputs in the wallet Github-Pull: #33268 Rebased-From: 609d265ebc51abfe9a9ce570da647b6839dc1214 --- test/functional/test_framework/script_util.py | 1 + test/functional/test_runner.py | 2 + test/functional/wallet_anchor.py | 128 ++++++++++++++++++ 3 files changed, 131 insertions(+) create mode 100755 test/functional/wallet_anchor.py diff --git a/test/functional/test_framework/script_util.py b/test/functional/test_framework/script_util.py index fce32e138e..d97120fd73 100755 --- a/test/functional/test_framework/script_util.py +++ b/test/functional/test_framework/script_util.py @@ -50,6 +50,7 @@ assert len(DUMMY_MIN_OP_RETURN_SCRIPT) == MIN_PADDING PAY_TO_ANCHOR = CScript([OP_1, bytes.fromhex("4e73")]) +ANCHOR_ADDRESS = "bcrt1pfeesnyr2tx" def key_to_p2pk_script(key): key = check_key(key) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 1fa22b1cc6..000407b118 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -170,6 +170,8 @@ 'wallet_listreceivedby.py --descriptors', 'wallet_abandonconflict.py --legacy-wallet', 'wallet_abandonconflict.py --descriptors', + 'wallet_anchor.py --legacy-wallet', + 'wallet_anchor.py --descriptors', 'feature_reindex.py', 'feature_reindex_readonly.py', 'wallet_labels.py --legacy-wallet', diff --git a/test/functional/wallet_anchor.py b/test/functional/wallet_anchor.py new file mode 100755 index 0000000000..f641f3f9ee --- /dev/null +++ b/test/functional/wallet_anchor.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025-present The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or https://www.opensource.org/licenses/mit-license.php. + +import time + +from test_framework.blocktools import MAX_FUTURE_BLOCK_TIME +from test_framework.descriptors import descsum_create +from test_framework.messages import ( + COutPoint, + CTxIn, + CTxInWitness, + CTxOut, +) +from test_framework.script_util import ( + ANCHOR_ADDRESS, + PAY_TO_ANCHOR, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, +) +from test_framework.wallet import MiniWallet + +class WalletAnchorTest(BitcoinTestFramework): + def add_options(self, parser): + self.add_wallet_options(parser) + + def set_test_params(self): + self.num_nodes = 1 + + def skip_test_if_missing_module(self): + self.skip_if_no_wallet() + + def test_0_value_anchor_listunspent(self): + self.log.info("Test that 0-value anchor outputs are detected as UTXOs") + + # Create an anchor output, and spend it + sender = MiniWallet(self.nodes[0]) + anchor_tx = sender.create_self_transfer(fee_rate=0, version=3)["tx"] + anchor_tx.vout.append(CTxOut(0, PAY_TO_ANCHOR)) + anchor_tx.rehash() # Rehash after modifying anchor_tx + anchor_spend = sender.create_self_transfer(version=3)["tx"] + anchor_spend.vin.append(CTxIn(COutPoint(anchor_tx.sha256, 1), b"")) + anchor_spend.wit.vtxinwit.append(CTxInWitness()) + anchor_spend.rehash() # Rehash after modifying anchor_spend + submit_res = self.nodes[0].submitpackage([anchor_tx.serialize().hex(), anchor_spend.serialize().hex()]) + assert_equal(submit_res["package_msg"], "success") + anchor_txid = anchor_tx.hash + anchor_spend_txid = anchor_spend.hash + + # Mine each tx in separate blocks + self.generateblock(self.nodes[0], sender.get_address(), [anchor_tx.serialize().hex()]) + anchor_tx_height = self.nodes[0].getblockcount() + self.generateblock(self.nodes[0], sender.get_address(), [anchor_spend.serialize().hex()]) + + # Mock time forward and generate some blocks to avoid rescanning of latest blocks + self.nodes[0].setmocktime(int(time.time()) + MAX_FUTURE_BLOCK_TIME + 1) + self.generate(self.nodes[0], 10) + + self.nodes[0].createwallet(wallet_name="anchor", disable_private_keys=True) + wallet = self.nodes[0].get_wallet_rpc("anchor") + + wallet.importaddress(ANCHOR_ADDRESS, rescan=False) + + # The wallet should have no UTXOs, and not know of the anchor tx or its spend + assert_equal(wallet.listunspent(), []) + assert_raises_rpc_error(-5, "Invalid or non-wallet transaction id", wallet.gettransaction, anchor_txid) + assert_raises_rpc_error(-5, "Invalid or non-wallet transaction id", wallet.gettransaction, anchor_spend_txid) + + # Rescanning the block containing the anchor so that listunspent will list the output + wallet.rescanblockchain(0, anchor_tx_height) + utxos = wallet.listunspent() + assert_equal(len(utxos), 1) + assert_equal(utxos[0]["txid"], anchor_txid) + assert_equal(utxos[0]["address"], ANCHOR_ADDRESS) + assert_equal(utxos[0]["amount"], 0) + wallet.gettransaction(anchor_txid) + assert_raises_rpc_error(-5, "Invalid or non-wallet transaction id", wallet.gettransaction, anchor_spend_txid) + + # Rescan the rest of the blockchain to see the anchor was spent + wallet.rescanblockchain() + assert_equal(wallet.listunspent(), []) + wallet.gettransaction(anchor_spend_txid) + + def test_cannot_sign_anchors(self): + self.log.info("Test that the wallet cannot spend anchor outputs") + for disable_privkeys in [False, True]: + self.nodes[0].createwallet(wallet_name=f"anchor_spend_{disable_privkeys}", disable_private_keys=disable_privkeys) + wallet = self.nodes[0].get_wallet_rpc(f"anchor_spend_{disable_privkeys}") + if self.options.descriptors: + import_res = wallet.importdescriptors([ + {"desc": descsum_create(f"addr({ANCHOR_ADDRESS})"), "timestamp": "now"}, + {"desc": descsum_create(f"raw({PAY_TO_ANCHOR.hex()})"), "timestamp": "now"} + ]) + assert_equal(import_res[0]["success"], disable_privkeys) + assert_equal(import_res[1]["success"], disable_privkeys) + else: + wallet.importaddress(ANCHOR_ADDRESS) + + anchor_txid = self.default_wallet.sendtoaddress(ANCHOR_ADDRESS, 1) + self.generate(self.nodes[0], 1) + + wallet = self.nodes[0].get_wallet_rpc("anchor_spend_True") + utxos = wallet.listunspent() + assert_equal(len(utxos), 1) + assert_equal(utxos[0]["txid"], anchor_txid) + assert_equal(utxos[0]["address"], ANCHOR_ADDRESS) + assert_equal(utxos[0]["amount"], 1) + + if self.options.descriptors: + assert_raises_rpc_error(-4, "Missing solving data for estimating transaction size", wallet.send, [{self.default_wallet.getnewaddress(): 0.9999}]) + assert_raises_rpc_error(-4, "Unable to determine the size of the transaction, the wallet contains unsolvable descriptors", wallet.sendall, recipients=[self.default_wallet.getnewaddress()]) + else: + assert_raises_rpc_error(-4, "Insufficient funds", wallet.send, [{self.default_wallet.getnewaddress(): 0.9999}]) + assert_raises_rpc_error(-6, "Total value of UTXO pool too low to pay for transaction. Try using lower feerate or excluding uneconomic UTXOs with 'send_max' option.", wallet.sendall, recipients=[self.default_wallet.getnewaddress()]) + assert_raises_rpc_error(-4, "Error: Private keys are disabled for this wallet", wallet.sendtoaddress, self.default_wallet.getnewaddress(), 0.9999) + assert_raises_rpc_error(-4, "Unable to determine the size of the transaction, the wallet contains unsolvable descriptors", wallet.sendall, recipients=[self.default_wallet.getnewaddress()], inputs=utxos) + + def run_test(self): + self.default_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name) + self.test_0_value_anchor_listunspent() + self.test_cannot_sign_anchors() + +if __name__ == '__main__': + WalletAnchorTest(__file__).main() From e973b61dbb431141e23846d95a86221b01587900 Mon Sep 17 00:00:00 2001 From: glozow Date: Mon, 12 Jan 2026 14:29:54 -0800 Subject: [PATCH 180/356] [doc] update release notes for 29.3rc1 --- doc/release-notes.md | 43 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index 4e2071dfa6..263ee553d1 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.x is now available from: +Bitcoin Core version 29.3rc1 is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -37,6 +37,36 @@ unsupported systems. Notable changes =============== +### P2P + +- #33050 net, validation: don't punish peers for consensus-invalid txs +- #33723 chainparams: remove dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us + +### Validation + +- #32473 Introduce per-txin sighash midstate cache for legacy/p2sh/segwitv0 scripts +- #33105 validation: detect witness stripping without re-running Script checks + +### Wallet + +- #33268 wallet: Identify transactions spending 0-value outputs, and add tests for anchor outputs in a wallet +- #34156 wallet: fix unnamed legacy wallet migration failure +- #34226 wallet: test: Relative wallet failed migration cleanup +- #34123 wallet: migration, avoid creating spendable wallet from a watch-only legacy wallet +- #34215 wallettool: fix unnamed createfromdump failure walletsdir deletion + +### Mining + +- #33475 bugfix: miner: fix `addPackageTxs` unsigned integer overflow + +### Build + +- #34227 guix: Fix `osslsigncode` tests + +### Documentation + +- #33623 doc: document capnproto and libmultiprocess deps in 29.x + ### Test - #33612 test: change log rate limit version gate @@ -51,8 +81,17 @@ Credits Thanks to everyone who directly contributed to this release: +- Anthony Towns +- Antoine Poinsot - Ava Chow +- David Gumberg - Eugene Siegel +- fanquake +- furszy +- Hennadii Stepanov +- ismaelsadeeq +- Pieter Wuille +- SatsAndSports - willcl-ark As well as to everyone that helped with translations on From e9c978391ff74e99724fbda9fb50f0c45fb13008 Mon Sep 17 00:00:00 2001 From: glozow Date: Mon, 12 Jan 2026 14:36:12 -0800 Subject: [PATCH 181/356] [build] bump version to 29.3rc1 --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 70f672132b..8337d69535 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,9 +28,9 @@ get_directory_property(precious_variables CACHE_VARIABLES) #============================= set(CLIENT_NAME "Bitcoin Core") set(CLIENT_VERSION_MAJOR 29) -set(CLIENT_VERSION_MINOR 2) +set(CLIENT_VERSION_MINOR 3) set(CLIENT_VERSION_BUILD 0) -set(CLIENT_VERSION_RC 0) +set(CLIENT_VERSION_RC 1) set(CLIENT_VERSION_IS_RELEASE "true") set(COPYRIGHT_YEAR "2025") From b834447fb2f2073e25164a80ba197a3120610b92 Mon Sep 17 00:00:00 2001 From: glozow Date: Mon, 12 Jan 2026 14:37:04 -0800 Subject: [PATCH 182/356] [doc] generate manpages 29.3rc1 --- doc/man/bitcoin-cli.1 | 6 +++--- doc/man/bitcoin-qt.1 | 6 +++--- doc/man/bitcoin-tx.1 | 6 +++--- doc/man/bitcoin-util.1 | 6 +++--- doc/man/bitcoin-wallet.1 | 6 +++--- doc/man/bitcoind.1 | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index ce6f35c198..aad8ffd52b 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "October 2025" "bitcoin-cli v29.2.0" "User Commands" +.TH BITCOIN-CLI "1" "January 2026" "bitcoin-cli v29.3.0rc1" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.2.0 +bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc1 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.2.0 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Core RPC client version v29.2.0 +Bitcoin Core RPC client version v29.3.0rc1 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server. .PP diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index 5efc9e9617..ba38159542 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "October 2025" "bitcoin-qt v29.2.0" "User Commands" +.TH BITCOIN-QT "1" "January 2026" "bitcoin-qt v29.3.0rc1" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.2.0 +bitcoin-qt \- manual page for bitcoin-qt v29.3.0rc1 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Core version v29.2.0 +Bitcoin Core version v29.3.0rc1 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core. .PP diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index 90a233619f..4be1d58291 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "October 2025" "bitcoin-tx v29.2.0" "User Commands" +.TH BITCOIN-TX "1" "January 2026" "bitcoin-tx v29.3.0rc1" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.2.0 +bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc1 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.2.0 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Core bitcoin\-tx utility version v29.2.0 +Bitcoin Core bitcoin\-tx utility version v29.3.0rc1 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index 4186bd3f5a..f4bc33f4da 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "October 2025" "bitcoin-util v29.2.0" "User Commands" +.TH BITCOIN-UTIL "1" "January 2026" "bitcoin-util v29.3.0rc1" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.2.0 +bitcoin-util \- manual page for bitcoin-util v29.3.0rc1 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.2.0 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-util utility version v29.2.0 +Bitcoin Core bitcoin\-util utility version v29.3.0rc1 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index 97c6144f81..000fb2a814 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "October 2025" "bitcoin-wallet v29.2.0" "User Commands" +.TH BITCOIN-WALLET "1" "January 2026" "bitcoin-wallet v29.3.0rc1" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.2.0 +bitcoin-wallet \- manual page for bitcoin-wallet v29.3.0rc1 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-wallet utility version v29.2.0 +Bitcoin Core bitcoin\-wallet utility version v29.3.0rc1 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files. .PP diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index 82804a50c8..f0005de61c 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "October 2025" "bitcoind v29.2.0" "User Commands" +.TH BITCOIND "1" "January 2026" "bitcoind v29.3.0rc1" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.2.0 +bitcoind \- manual page for bitcoind v29.3.0rc1 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Core daemon version v29.2.0 +Bitcoin Core daemon version v29.3.0rc1 .PP The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP From 2f4b23e0940ac4161f34a2c5409812b1405e9e8e Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 21 Jan 2026 14:06:47 +0000 Subject: [PATCH 183/356] Restore luke-jr's DNS seed It was removed by Core maliciously / for no reason. This partially reverts commit 7a71850a6d1d2eaf09e19d9d0af574a90487ec2b. --- src/kernel/chainparams.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 0f193eff74..ac3fc9eada 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -146,6 +146,7 @@ class CMainParams : public CChainParams { // release ASAP to avoid it where possible. vSeeds.emplace_back("seed.bitcoin.sipa.be."); // Pieter Wuille, only supports x1, x5, x9, and xd vSeeds.emplace_back("dnsseed.bluematt.me."); // Matt Corallo, only supports x9 + vSeeds.emplace_back("dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us."); // Luke Dashjr vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch."); // Jonas Schnelli, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.btc.petertodd.net."); // Peter Todd, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.bitcoin.sprovoost.nl."); // Sjors Provoost From 679d961c244b9f1f8f3a80380c083575309e6864 Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Mon, 30 Jun 2025 14:46:33 -0700 Subject: [PATCH 184/356] test: wallet: Check direct file backup name. This check ensures that when migrating a legacy wallet with a direct filename, the backup file is named as expected. Co-authored-by: Ava Chow Github-Pull: bitcoin/bitcoin#32273 Rebased-From: e22c3599c6772730e72e17fc68c99feea09b4d29 --- test/functional/wallet_migration.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 8129baf438..6bae61b76b 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -667,7 +667,10 @@ def test_direct_file(self): ) assert (self.master_node.wallets_path / "plainfile").is_file() - self.master_node.migratewallet("plainfile") + mocked_time = int(time.time()) + self.master_node.setmocktime(mocked_time) + migrate_res = self.master_node.migratewallet("plainfile") + assert_equal(f"plainfile_{mocked_time}.legacy.bak", os.path.basename(migrate_res["backup_path"])) wallet = self.master_node.get_wallet_rpc("plainfile") info = wallet.getwalletinfo() assert_equal(info["descriptors"], True) From fee79e70d933bb74feadc7cbf1bad8e25b1efe32 Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Sat, 12 Apr 2025 00:03:40 -0700 Subject: [PATCH 185/356] wallet: Fix migration of wallets with pathnames. Co-authored-by: Russell Yanofsky Github-Pull: bitcoin/bitcoin#32273 Rebased-From: 70f1c99c901de64d6ccea793b7e267e20dfa49cf --- src/wallet/wallet.cpp | 15 +++++++++++++-- test/functional/wallet_migration.py | 2 +- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 913c745320..2cfde74f0b 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4546,9 +4546,20 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr return util::Error{_("Error: This wallet is already a descriptor wallet")}; } - // Make a backup of the DB + // Make a backup of the DB in the wallet's directory with a unique filename + // using the wallet name and current timestamp. The backup filename is based + // on the name of the parent directory containing the wallet data in most + // cases, but in the case where the wallet name is a path to a data file, + // the name of the data file is used, and in the case where the wallet name + // is blank, "default_wallet" is used. fs::path this_wallet_dir = fs::absolute(fs::PathFromString(local_wallet->GetDatabase().Filename())).parent_path(); - fs::path backup_filename = fs::PathFromString(strprintf("%s_%d.legacy.bak", MigrationPrefixName(*local_wallet), GetTime())); + const std::string backup_prefix = wallet_name.empty() ? MigrationPrefixName(*local_wallet) : [&] { + // fs::weakly_canonical resolves relative specifiers and remove trailing slashes. + const auto legacy_wallet_path = fs::weakly_canonical(GetWalletDir() / fs::PathFromString(wallet_name)); + return fs::PathToString(legacy_wallet_path.filename()); + }(); + + fs::path backup_filename = fs::PathFromString(strprintf("%s_%d.legacy.bak", backup_prefix, GetTime())); fs::path backup_path = this_wallet_dir / backup_filename; if (!local_wallet->BackupWallet(fs::PathToString(backup_path))) { if (was_loaded) { diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 6bae61b76b..c0ae27709b 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -636,7 +636,7 @@ def test_migration_failure(self, wallet_name): assert Path(old_path / "wallet.dat").exists() assert Path(master_path / "wallet.dat").exists() - backup_prefix = "default_wallet" if is_default else wallet_name + backup_prefix = "default_wallet" if is_default else os.path.basename(os.path.abspath(master_path)) backup_path = master_path / f"{backup_prefix}_{mocked_time}.legacy.bak" assert backup_path.exists() From 8602cb178e59e32588a6c1d75b93781c2fbc5ff1 Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Sat, 12 Apr 2025 00:05:03 -0700 Subject: [PATCH 186/356] test: Migration of a wallet with `../` in path. Github-Pull: bitcoin/bitcoin#32273 Rebased-From: 63c6d364376907c10b9baa3c6f4d72e3f1881abc --- test/functional/wallet_migration.py | 60 ++++++++++++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index c0ae27709b..2396fe1fb7 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -113,7 +113,10 @@ def migrate_and_get_rpc(self, wallet_name, **kwargs): if wallet_name == "": shutil.copyfile(self.old_node.wallets_path / "wallet.dat", self.master_node.wallets_path / "wallet.dat") else: - shutil.copytree(self.old_node.wallets_path / wallet_name, self.master_node.wallets_path / wallet_name) + src = os.path.abspath(self.old_node.wallets_path / wallet_name) + dst = os.path.abspath(self.master_node.wallets_path / wallet_name) + if src != dst : + shutil.copytree(self.old_node.wallets_path / wallet_name, self.master_node.wallets_path / wallet_name, dirs_exist_ok=True) # Migrate, checking that rescan does not occur with self.master_node.assert_debug_log(expected_msgs=[], unexpected_msgs=["Rescanning"]): migrate_info = self.master_node.migratewallet(wallet_name=wallet_name, **kwargs) @@ -525,6 +528,60 @@ def test_unloaded_by_path(self): assert_equal(bals, wallet.getbalances()) + def test_wallet_with_relative_path(self): + self.log.info("Test migration of a wallet that isn't loaded, specified by a relative path") + + # Get the nearest common path of both nodes' wallet paths. + common_parent = os.path.commonpath([self.master_node.wallets_path, self.old_node.wallets_path]) + + # This test assumes that the relative path from each wallet directory to the common path is identical. + assert_equal(os.path.relpath(common_parent, start=self.master_node.wallets_path), os.path.relpath(common_parent, start=self.old_node.wallets_path)) + + wallet_name = "relative" + absolute_path = os.path.abspath(os.path.join(common_parent, wallet_name)) + relative_name = os.path.relpath(absolute_path, start=self.master_node.wallets_path) + + wallet = self.create_legacy_wallet(relative_name) + # listwalletdirs only returns wallets in the wallet directory + assert {"name": relative_name} not in wallet.listwalletdir()["wallets"] + assert relative_name in wallet.listwallets() + + default = self.master_node.get_wallet_rpc(self.default_wallet_name) + addr = wallet.getnewaddress() + txid = default.sendtoaddress(addr, 1) + self.generate(self.master_node, 1) + bals = wallet.getbalances() + + # migratewallet uses current time in naming the backup file, set a mock time + # to check that this works correctly. + curr_time = int(time.time()) + self.master_node.setmocktime(curr_time) + migrate_res, wallet = self.migrate_and_get_rpc(relative_name) + self.master_node.setmocktime(0) + + # Check that the wallet was migrated, knows the right txid, and has the right balance. + assert wallet.gettransaction(txid) + assert_equal(bals, wallet.getbalances()) + + # The migrated wallet should not be in the wallet dir, but should be in the list of wallets. + info = wallet.getwalletinfo() + + walletdirlist = wallet.listwalletdir() + assert {"name": info["walletname"]} not in walletdirlist["wallets"] + + walletlist = wallet.listwallets() + assert info["walletname"] in walletlist + + # Check that old node can restore from the backup. + self.old_node.restorewallet("relative_restored", migrate_res['backup_path']) + wallet = self.old_node.get_wallet_rpc("relative_restored") + assert wallet.gettransaction(txid) + assert_equal(bals, wallet.getbalances()) + + info = wallet.getwalletinfo() + assert_equal(info["descriptors"], False) + assert_equal(info["format"], "bdb") + def clear_default_wallet(self, backup_file): # Test cleanup: Clear unnamed default wallet for subsequent tests (self.old_node.wallets_path / "wallet.dat").unlink() @@ -1519,6 +1576,7 @@ def run_test(self): self.test_encrypted() self.test_nonexistent() self.test_unloaded_by_path() + self.test_wallet_with_relative_path() migration_failure_cases = [ "", From 9516979d199ea9e8bdcf3dfe56b51c841cb36ad8 Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Wed, 16 Apr 2025 14:11:12 -0700 Subject: [PATCH 187/356] test: Migration fail recovery w/ `../` in path Github-Pull: bitcoin/bitcoin#32273 Rebased-From: 41faef5f80d69ed984af685bd9d2a43268009fb6 --- test/functional/wallet_migration.py | 48 +++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 2396fe1fb7..0874beb511 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -1084,6 +1084,53 @@ def test_failed_migration_cleanup(self): wallets = self.master_node.listwallets() assert "failed" in wallets and all(wallet not in wallets for wallet in ["failed_watchonly", "failed_solvables"]) + def test_failed_migration_cleanup_relative_path(self): + self.log.info("Test that a failed migration with a relative path is cleaned up") + + # Get the nearest common path of both nodes' wallet paths. + common_parent = os.path.commonpath([self.master_node.wallets_path, self.old_node.wallets_path]) + + # This test assumes that the relative path from each wallet directory to the common path is identical. + assert_equal(os.path.relpath(common_parent, start=self.master_node.wallets_path), os.path.relpath(common_parent, start=self.old_node.wallets_path)) + + wallet_name = "relativefailure" + absolute_path = os.path.abspath(os.path.join(common_parent, wallet_name)) + relative_name = os.path.relpath(absolute_path, start=self.master_node.wallets_path) + + wallet = self.create_legacy_wallet(relative_name) + + # Make a copy of the wallet with the solvables wallet name so that we are unable + # to create the solvables wallet when migrating, thus failing to migrate + wallet.unloadwallet() + solvables_path = os.path.join(common_parent, f"{wallet_name}_solvables") + + shutil.copytree(self.old_node.wallets_path / relative_name, solvables_path) + original_shasum = sha256sum_file(os.path.join(solvables_path, "wallet.dat")) + + self.old_node.loadwallet(relative_name) + + # Add a multisig so that a solvables wallet is created + wallet.addmultisigaddress(2, [wallet.getnewaddress(), get_generate_key().pubkey]) + wallet.importaddress(get_generate_key().p2pkh_addr) + + self.old_node.unloadwallet(relative_name) + assert_raises_rpc_error(-4, "Failed to create database", self.master_node.migratewallet, relative_name) + + assert all(wallet not in self.master_node.listwallets() for wallet in [f"{wallet_name}", f"{wallet_name}_watchonly", f"{wallet_name}_solvables"]) + + assert not (self.master_node.wallets_path / f"{wallet_name}_watchonly").exists() + # Since the file in failed_solvables is one that we put there, migration shouldn't touch it + assert os.path.exists(solvables_path) + new_shasum = sha256sum_file(os.path.join(solvables_path , "wallet.dat")) + assert_equal(original_shasum, new_shasum) + + # Check the wallet we tried to migrate is still BDB + datfile = os.path.join(absolute_path, "wallet.dat") + with open(datfile, "rb") as f: + data = f.read(16) + _, _, magic = struct.unpack("QII", data) + assert_equal(magic, BTREE_MAGIC) + def test_blank(self): self.log.info("Test that a blank wallet is migrated") wallet = self.create_legacy_wallet("blank", blank=True) @@ -1595,6 +1642,7 @@ def run_test(self): self.test_conflict_txs() self.test_hybrid_pubkey() self.test_failed_migration_cleanup() + self.test_failed_migration_cleanup_relative_path() self.test_avoidreuse() self.test_preserve_tx_extra_info() self.test_blank() From 90a9765edba2451a5c2b289b56a186617a5926fa Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Thu, 3 Jul 2025 15:29:27 -0700 Subject: [PATCH 188/356] test: Migration of a wallet ending in `/` Github-Pull: bitcoin/bitcoin#32273 Rebased-From: f0bb3d50fef08d9f981eac45841ef2df7444031b --- test/functional/wallet_migration.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 0874beb511..c3a693616d 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -582,6 +582,26 @@ def test_wallet_with_relative_path(self): assert_equal(info["descriptors"], False) assert_equal(info["format"], "bdb") + def test_wallet_with_path_ending_in_slash(self): + self.log.info("Test migrating a wallet with a name/path ending in '/'") + + # The last directory in the wallet's path + final_dir = "mywallet" + wallet_name = f"path/to/{final_dir}/" + wallet = self.create_legacy_wallet(wallet_name) + default = self.master_node.get_wallet_rpc(self.default_wallet_name) + + addr = wallet.getnewaddress() + txid = default.sendtoaddress(addr, 1) + self.generate(self.master_node, 1) + bals = wallet.getbalances() + + _, wallet = self.migrate_and_get_rpc(wallet_name) + + assert wallet.gettransaction(txid) + + assert_equal(bals, wallet.getbalances()) + def clear_default_wallet(self, backup_file): # Test cleanup: Clear unnamed default wallet for subsequent tests (self.old_node.wallets_path / "wallet.dat").unlink() @@ -1624,6 +1644,7 @@ def run_test(self): self.test_nonexistent() self.test_unloaded_by_path() self.test_wallet_with_relative_path() + self.test_wallet_with_path_ending_in_slash() migration_failure_cases = [ "", From 0dcf09418c282baee680f3be2c31d6660cef1fd6 Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Thu, 3 Jul 2025 15:30:18 -0700 Subject: [PATCH 189/356] test: Migration of a wallet ending in `../` Github-Pull: bitcoin/bitcoin#32273 Rebased-From: 76fe0e59ec4a5b0c5b18f46bfcbbf99628d74d77 --- test/functional/wallet_migration.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index c3a693616d..4e7d2e3d20 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -602,6 +602,24 @@ def test_wallet_with_path_ending_in_slash(self): assert_equal(bals, wallet.getbalances()) + def test_wallet_with_path_ending_in_relative_specifier(self): + self.log.info("Test migrating a wallet with a name/path ending in a relative specifier, '..'") + wallet_ending_in_relative = "path/that/ends/in/.." + # the wallet data is actually inside of path/that/ends/ + wallet = self.create_legacy_wallet(wallet_ending_in_relative) + default = self.master_node.get_wallet_rpc(self.default_wallet_name) + + addr = wallet.getnewaddress() + txid = default.sendtoaddress(addr, 1) + self.generate(self.master_node, 1) + bals = wallet.getbalances() + + _, wallet = self.migrate_and_get_rpc(wallet_ending_in_relative) + + assert wallet.gettransaction(txid) + + assert_equal(bals, wallet.getbalances()) + def clear_default_wallet(self, backup_file): # Test cleanup: Clear unnamed default wallet for subsequent tests (self.old_node.wallets_path / "wallet.dat").unlink() @@ -1645,6 +1663,7 @@ def run_test(self): self.test_unloaded_by_path() self.test_wallet_with_relative_path() self.test_wallet_with_path_ending_in_slash() + self.test_wallet_with_path_ending_in_relative_specifier() migration_failure_cases = [ "", From 06a7205cee98c4596f2902ad1e0f8a5e9de427a7 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 16 Jul 2025 09:05:45 +0200 Subject: [PATCH 190/356] test: Failed load after migrate should restore backup Github-Pull: bitcoin/bitcoin#32984 Rebased-From: 060695c22ae7b2b0f2a1dd1417ed1b9d5a5ab542 --- test/functional/wallet_migration.py | 33 +++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 8129baf438..b1b67e5efe 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -66,6 +66,12 @@ def assert_is_sqlite(self, wallet_name): assert_equal(file_magic, b'SQLite format 3\x00') assert_equal(self.master_node.get_wallet_rpc(wallet_name).getwalletinfo()["format"], "sqlite") + def assert_is_bdb(self, wallet_name): + with open(self.master_node.wallets_path / wallet_name / self.wallet_data_filename, "rb") as f: + data = f.read(16) + _, _, magic = struct.unpack("QII", data) + assert_equal(magic, BTREE_MAGIC) + def create_legacy_wallet(self, wallet_name, **kwargs): self.old_node.createwallet(wallet_name=wallet_name, descriptors=False, **kwargs) wallet = self.old_node.get_wallet_rpc(wallet_name) @@ -1003,10 +1009,7 @@ def test_failed_migration_cleanup(self): assert_equal(original_shasum, new_shasum) # Check the wallet we tried to migrate is still BDB - with open(self.master_node.wallets_path / "failed" / "wallet.dat", "rb") as f: - data = f.read(16) - _, _, magic = struct.unpack("QII", data) - assert_equal(magic, BTREE_MAGIC) + self.assert_is_bdb("failed") #################################################### # Perform the same test with a loaded legacy wallet. @@ -1465,6 +1468,27 @@ def test_solvable_no_privs(self): assert_equal(addr_info["solvable"], True) assert "hex" in addr_info + def test_loading_failure_after_migration(self): + self.log.info("Test that a failed loading of the wallet at the end of migration restores the backup") + self.stop_node(self.old_node.index) + self.old_node.chain = "signet" + self.old_node.replace_in_config([("regtest=", "signet="), ("[regtest]", "[signet]")]) + # Disable network sync and prevent disk space warning on small (tmp)fs + self.start_node(self.old_node.index, extra_args=self.old_node.extra_args + ["-maxconnections=0", "-prune=550"]) + + wallet_name = "failed_load_after_migrate" + self.create_legacy_wallet(wallet_name) + assert_raises_rpc_error(-4, "Wallet loading failed. Wallet files should not be reused across chains.", lambda: self.migrate_and_get_rpc(wallet_name)) + + # Check the wallet we tried to migrate is still BDB + self.assert_is_bdb(wallet_name) + + self.stop_node(self.old_node.index) + self.old_node.chain = "regtest" + self.old_node.replace_in_config([("signet=", "regtest="), ("[signet]", "[regtest]")]) + self.start_node(self.old_node.index) + self.connect_nodes(1, 0) + def unsynced_wallet_on_pruned_node_fails(self): self.log.info("Test migration of an unsynced wallet on a pruned node fails gracefully") wallet = self.create_legacy_wallet("", load_on_startup=False) @@ -1544,6 +1568,7 @@ def run_test(self): self.test_miniscript() self.test_taproot() self.test_solvable_no_privs() + self.test_loading_failure_after_migration() # Note: After this test the first 250 blocks of 'master_node' are pruned self.unsynced_wallet_on_pruned_node_fails() From 2dd8c9e2b591871564e3dfcf217fa23053949b60 Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Wed, 7 Jan 2026 16:02:58 -0800 Subject: [PATCH 191/356] QA: wallet_migration: Use assert_is_bdb in test_migration_failure Github-Pull: bitcoin/bitcoin#34226 Rebased-From: eeaf28dbe0e09819ab0e95bb7762b29536bdeef6 --- test/functional/wallet_migration.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index b1b67e5efe..b95d320e8a 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -646,11 +646,7 @@ def test_migration_failure(self, wallet_name): backup_path = master_path / f"{backup_prefix}_{mocked_time}.legacy.bak" assert backup_path.exists() - with open(self.master_node.wallets_path / wallet_name / self.wallet_data_filename, "rb") as f: - data = f.read(16) - _, _, magic = struct.unpack("QII", data) - assert_equal(magic, BTREE_MAGIC) - + self.assert_is_bdb(wallet_name) # Cleanup if is_default: From c0a48a437d8ad93b30a19aa98f0acef87dc57a38 Mon Sep 17 00:00:00 2001 From: furszy Date: Sun, 4 Jan 2026 12:25:21 -0500 Subject: [PATCH 192/356] QA: wallet_migration: Use assert_is_bdb in unsynced_wallet_on_pruned_node_fails Github-Pull: bitcoin/bitcoin#34156 Rebased-From: b7c34d08dd9549a95cffc6ec1ffa4bb4f81e35eb --- test/functional/wallet_migration.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index b95d320e8a..3feca22d7a 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -1510,10 +1510,7 @@ def unsynced_wallet_on_pruned_node_fails(self): # Verify the /wallets/ path exists, the wallet is still BDB and the backup file is there. assert self.master_node.wallets_path.exists() - with open(self.master_node.wallets_path / "wallet.dat", "rb") as f: - data = f.read(16) - _, _, magic = struct.unpack("QII", data) - assert_equal(magic, BTREE_MAGIC) + self.assert_is_bdb("") backup_path = self.master_node.wallets_path / f"default_wallet_{mocked_time}.legacy.bak" assert backup_path.exists() From 30879e14822a666f5809b8153abfe4b6a61a1260 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 15 Jan 2026 23:00:38 +0000 Subject: [PATCH 193/356] QA: wallet_migration: Test that all the failed migration cases also succeed properly --- test/functional/wallet_migration.py | 33 +++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 3feca22d7a..d385c6be12 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -535,6 +535,7 @@ def clear_default_wallet(self, backup_file): # Test cleanup: Clear unnamed default wallet for subsequent tests (self.old_node.wallets_path / "wallet.dat").unlink() (self.master_node.wallets_path / "wallet.dat").unlink(missing_ok=True) + (self.master_node.wallets_path / "wallet.dat-journal").unlink(missing_ok=True) shutil.rmtree(self.master_node.wallets_path / "default_wallet_watchonly", ignore_errors=True) shutil.rmtree(self.master_node.wallets_path / "default_wallet_solvables", ignore_errors=True) backup_file.unlink() @@ -595,13 +596,13 @@ def test_default_wallet_watch_only(self): wallet.unloadwallet() self.clear_default_wallet(backup_file=Path(res["backup_path"])) - def test_migration_failure(self, wallet_name): + def test_migration_failure(self, wallet_name, fail=True): is_default = wallet_name == "" wallet_pretty_name = "unnamed (default)" if is_default else f'"{wallet_name}"' - self.log.info(f"Test failure during migration of wallet named: {wallet_pretty_name}") + self.log.info(f"Test {'failure' if fail else 'success'} during migration of wallet named: {wallet_pretty_name}") # Preface, set up legacy wallet and unload it master_wallet = self.master_node.get_wallet_rpc(self.default_wallet_name) - wallet = self.create_legacy_wallet(wallet_name, blank=True) + wallet = self.create_legacy_wallet(wallet_name) wallet.importaddress(master_wallet.getnewaddress(address_type="legacy")) wallet.unloadwallet() @@ -620,8 +621,9 @@ def test_migration_failure(self, wallet_name): # DoMigration(). wo_dirname = f"{wo_prefix}_watchonly" watch_only_dir = self.master_node.wallets_path / wo_dirname - os.mkdir(watch_only_dir) - shutil.copyfile(old_path / "wallet.dat", watch_only_dir / "wallet.dat") + if fail: + os.mkdir(watch_only_dir) + shutil.copyfile(old_path / "wallet.dat", watch_only_dir / "wallet.dat") # Make a file in the wallets dir that must still exist after migration survive_path = self.master_node.wallets_path / "survive" @@ -630,7 +632,10 @@ def test_migration_failure(self, wallet_name): mocked_time = int(time.time()) self.master_node.setmocktime(mocked_time) - assert_raises_rpc_error(-4, "Failed to create database", self.master_node.migratewallet, wallet_name) + if fail: + assert_raises_rpc_error(-4, "Failed to create database", self.master_node.migratewallet, wallet_name) + else: + self.master_node.migratewallet(wallet_name) self.master_node.setmocktime(0) # Verify the /wallets/ path exists. @@ -646,13 +651,26 @@ def test_migration_failure(self, wallet_name): backup_path = master_path / f"{backup_prefix}_{mocked_time}.legacy.bak" assert backup_path.exists() - self.assert_is_bdb(wallet_name) + if fail: + self.assert_is_bdb(wallet_name) + else: + wallet = self.master_node.get_wallet_rpc(wallet_name) + info = wallet.getwalletinfo() + assert_equal(info["descriptors"], True) + self.assert_is_sqlite(wallet_name) + self.assert_is_sqlite(wo_dirname) + + self.master_node.unloadwallet(wallet_name) + self.master_node.unloadwallet(wo_dirname) # Cleanup if is_default: self.clear_default_wallet(backup_path) else: backup_path.unlink() + if not fail: + Path(watch_only_dir / "wallet.dat-journal").unlink(missing_ok=True) + Path(master_path / "wallet.dat-journal").unlink(missing_ok=True) Path(watch_only_dir / "wallet.dat").unlink() Path(watch_only_dir).rmdir() Path(master_path / "wallet.dat").unlink() @@ -1542,6 +1560,7 @@ def run_test(self): ] for wallet_name in migration_failure_cases: self.test_migration_failure(wallet_name=wallet_name) + self.test_migration_failure(wallet_name=wallet_name, fail=False) self.test_default_wallet() self.test_default_wallet_watch_only() From 66f8e3ad986d17d7093762366750460ad25e5001 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 15 Jan 2026 23:08:39 +0000 Subject: [PATCH 194/356] QA: wallet_migration: Test survival of a file in the wallet-owned directory itself --- test/functional/wallet_migration.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index d385c6be12..ba5a0cb5e1 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -629,6 +629,9 @@ def test_migration_failure(self, wallet_name, fail=True): survive_path = self.master_node.wallets_path / "survive" open(survive_path, "wb").close() assert survive_path.exists() + survive2_path = master_path / "survive" + open(survive2_path, "wb").close() + assert survive2_path.exists() mocked_time = int(time.time()) self.master_node.setmocktime(mocked_time) @@ -643,6 +646,7 @@ def test_migration_failure(self, wallet_name, fail=True): # Verify survive is still there assert survive_path.exists() + assert survive2_path.exists() # Verify both wallet paths exist. assert Path(old_path / "wallet.dat").exists() assert Path(master_path / "wallet.dat").exists() From 63ec4863f9d5b8cd26f3d29e9b2c9ac41068a896 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 15 Jan 2026 23:23:11 +0000 Subject: [PATCH 195/356] QA: wallet_migration: Check that reload failure does not leave behind a SQLite journal file --- test/functional/wallet_migration.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index ba5a0cb5e1..0de526a9ee 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -1537,6 +1537,8 @@ def unsynced_wallet_on_pruned_node_fails(self): backup_path = self.master_node.wallets_path / f"default_wallet_{mocked_time}.legacy.bak" assert backup_path.exists() + assert not (self.master_node.wallets_path / "wallet.dat-journal").exists() + self.clear_default_wallet(backup_path) From 3e0458ceb80a3e41795ce16236f70f964161e2f3 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 20 Jan 2026 05:03:31 +0000 Subject: [PATCH 196/356] QA: wallet_migration: Test ".", "./", "..", and "../subdir" --- test/functional/wallet_migration.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 0de526a9ee..97449b9fad 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -1560,7 +1560,11 @@ def run_test(self): migration_failure_cases = [ "", + ".", + "./", + "..", "../", + "../subdir", os.path.abspath(self.master_node.datadir_path / "absolute_path"), "normallynamedwallet" ] From baf6c8bc7baf51bdd89a647bd64ae0edb7aa1301 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 21 Jan 2026 13:33:20 +0000 Subject: [PATCH 197/356] QA: wallet_migration: Test migration of wallets without a dedicated directory --- test/functional/wallet_migration.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 97449b9fad..4ec2ab2dcb 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -606,13 +606,19 @@ def test_migration_failure(self, wallet_name, fail=True): wallet.importaddress(master_wallet.getnewaddress(address_type="legacy")) wallet.unloadwallet() + migrating_file_to_dir = wallet_name.endswith('.dat') if os.path.isabs(wallet_name): + assert not migrating_file_to_dir old_path = master_path = Path(wallet_name) else: old_path = self.old_node.wallets_path / wallet_name master_path = self.master_node.wallets_path / wallet_name - os.makedirs(master_path, exist_ok=True) - shutil.copyfile(old_path / "wallet.dat", master_path / "wallet.dat") + if migrating_file_to_dir: + assert not master_path.exists() + shutil.copyfile(old_path / "wallet.dat", master_path) + else: + os.makedirs(master_path, exist_ok=True) + shutil.copyfile(old_path / "wallet.dat", master_path / "wallet.dat") # This will be the watch-only directory the migration tries to create, # we make migration fail by placing a wallet.dat file there. @@ -629,7 +635,10 @@ def test_migration_failure(self, wallet_name, fail=True): survive_path = self.master_node.wallets_path / "survive" open(survive_path, "wb").close() assert survive_path.exists() - survive2_path = master_path / "survive" + if migrating_file_to_dir: + survive2_path = master_path.parent / "survive" + else: + survive2_path = master_path / "survive" open(survive2_path, "wb").close() assert survive2_path.exists() @@ -679,6 +688,10 @@ def test_migration_failure(self, wallet_name, fail=True): Path(watch_only_dir).rmdir() Path(master_path / "wallet.dat").unlink() Path(old_path / "wallet.dat").unlink(missing_ok=True) + try: + master_path.rmdir() + except Exception: + pass def test_direct_file(self): self.log.info("Test migration of a wallet that is not in a wallet directory") @@ -1562,6 +1575,7 @@ def run_test(self): "", ".", "./", + "test.dat", "..", "../", "../subdir", From 672f105977d978dffdd956a6795f6bbec63c55ee Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 21 Jan 2026 13:37:24 +0000 Subject: [PATCH 198/356] QA: wallet_migration: Test migration of bare "wallet.dat" --- test/functional/wallet_migration.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 4ec2ab2dcb..c13df76ebf 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -687,11 +687,20 @@ def test_migration_failure(self, wallet_name, fail=True): Path(watch_only_dir / "wallet.dat").unlink() Path(watch_only_dir).rmdir() Path(master_path / "wallet.dat").unlink() + if old_path != master_path: + Path(old_path / "wallet.dat").unlink() + shutil.rmtree(old_path / "database") + Path(old_path / "db.log").unlink(missing_ok=True) + Path(old_path / ".walletlock").unlink(missing_ok=True) Path(old_path / "wallet.dat").unlink(missing_ok=True) try: master_path.rmdir() except Exception: pass + try: + old_path.rmdir() + except Exception: + pass def test_direct_file(self): self.log.info("Test migration of a wallet that is not in a wallet directory") @@ -1575,6 +1584,7 @@ def run_test(self): "", ".", "./", + self.wallet_data_filename, "test.dat", "..", "../", From c4551092f6aa30af123197a5435eb0d9b2494df2 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 21 Jan 2026 15:22:28 +0000 Subject: [PATCH 199/356] QA: wallet_migration: Abstract named wallet cleanup --- test/functional/wallet_migration.py | 37 ++++++++++++++--------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index c13df76ebf..10b687941f 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -540,6 +540,21 @@ def clear_default_wallet(self, backup_file): shutil.rmtree(self.master_node.wallets_path / "default_wallet_solvables", ignore_errors=True) backup_file.unlink() + def clear_wallet(self, path, is_sqlite, rmdir_fail_ok=False): + Path(path / "wallet.dat").unlink() + if is_sqlite: + Path(path / "wallet.dat-journal").unlink(missing_ok=True) + else: + if (path / "database").exists(): + shutil.rmtree(path / "database") + Path(path / "db.log").unlink(missing_ok=True) + Path(path / ".walletlock").unlink(missing_ok=True) + try: + path.rmdir() + except Exception: + if not rmdir_fail_ok: + raise + def test_default_wallet(self): self.log.info("Test migration of the wallet named as the empty string") wallet = self.create_legacy_wallet("") @@ -681,26 +696,10 @@ def test_migration_failure(self, wallet_name, fail=True): self.clear_default_wallet(backup_path) else: backup_path.unlink() - if not fail: - Path(watch_only_dir / "wallet.dat-journal").unlink(missing_ok=True) - Path(master_path / "wallet.dat-journal").unlink(missing_ok=True) - Path(watch_only_dir / "wallet.dat").unlink() - Path(watch_only_dir).rmdir() - Path(master_path / "wallet.dat").unlink() + self.clear_wallet(watch_only_dir, is_sqlite=not fail) + self.clear_wallet(master_path, is_sqlite=not fail, rmdir_fail_ok=True) if old_path != master_path: - Path(old_path / "wallet.dat").unlink() - shutil.rmtree(old_path / "database") - Path(old_path / "db.log").unlink(missing_ok=True) - Path(old_path / ".walletlock").unlink(missing_ok=True) - Path(old_path / "wallet.dat").unlink(missing_ok=True) - try: - master_path.rmdir() - except Exception: - pass - try: - old_path.rmdir() - except Exception: - pass + self.clear_wallet(old_path, is_sqlite=False, rmdir_fail_ok=True) def test_direct_file(self): self.log.info("Test migration of a wallet that is not in a wallet directory") From d6439c0d4e6a9ca72a56a55231b46dad6ae15983 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 21 Jan 2026 15:02:52 +0000 Subject: [PATCH 200/356] QA: wallet_migration: Test various other relative paths --- test/functional/wallet_migration.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 10b687941f..dc896e381a 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -1588,6 +1588,10 @@ def run_test(self): "..", "../", "../subdir", + "../subdir/..", + "subdir/two", + "subdir/../two", + "subdir/two/..", os.path.abspath(self.master_node.datadir_path / "absolute_path"), "normallynamedwallet" ] From 691e570529dea30a9277117e7f58c93e0ae76680 Mon Sep 17 00:00:00 2001 From: furszy Date: Wed, 28 May 2025 06:17:54 -0400 Subject: [PATCH 201/356] wallet: refactor, dedup wallet re-loading code Github-Pull: bitcoin/bitcoin#31423 Rebased-From: e86d71b749c08bde6002b9aa2baee824975a518a --- src/wallet/wallet.cpp | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 913c745320..44a2395635 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4614,23 +4614,22 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr if (success) { - // Migration successful, unload all wallets locally, then reload them. - // Reload the main wallet LogInfo("Loading new wallets after migration...\n"); - track_for_cleanup(*local_wallet); - success = reload_wallet(local_wallet); + // Migration successful, unload all wallets locally, then reload them. + // Note: We use a pointer to the shared_ptr to avoid increasing its reference count, + // as 'reload_wallet' expects to be the sole owner (use_count == 1). + for (std::shared_ptr* wallet_ptr : {&local_wallet, &res.watchonly_wallet, &res.solvables_wallet}) { + if (success && *wallet_ptr) { + std::shared_ptr& wallet = *wallet_ptr; + // Save db path and reload wallet + track_for_cleanup(*wallet); + success = reload_wallet(wallet); + } + } + + // Set main wallet res.wallet = local_wallet; res.wallet_name = wallet_name; - if (success && res.watchonly_wallet) { - // Reload watchonly - track_for_cleanup(*res.watchonly_wallet); - success = reload_wallet(res.watchonly_wallet); - } - if (success && res.solvables_wallet) { - // Reload solvables - track_for_cleanup(*res.solvables_wallet); - success = reload_wallet(res.solvables_wallet); - } } if (!success) { // Migration failed, cleanup From 34ac206b1c132df3f7c8967b7264333a42f3288a Mon Sep 17 00:00:00 2001 From: furszy Date: Wed, 4 Dec 2024 12:48:18 -0500 Subject: [PATCH 202/356] wallet: migration, avoid creating spendable wallet from a watch-only legacy wallet Currently, the migration process creates a brand-new descriptor wallet with no connection to the user's legacy wallet when the legacy wallet lacks key material and contains only watch-only scripts. This behavior is not aligned with user expectations. If the legacy wallet contains only watch-only scripts, the migration process should only generate a watch-only wallet instead. Github-Pull: bitcoin/bitcoin#31423 Rebased-From: b78990734621b8fe46c68a6e7edaf1fbd2f7d351 --- src/qt/walletcontroller.cpp | 2 +- src/wallet/wallet.cpp | 44 +++++++++++++++++++++++++---- test/functional/wallet_migration.py | 22 +++++++++++++-- 3 files changed, 59 insertions(+), 9 deletions(-) diff --git a/src/qt/walletcontroller.cpp b/src/qt/walletcontroller.cpp index dd093e984a..869f9614c5 100644 --- a/src/qt/walletcontroller.cpp +++ b/src/qt/walletcontroller.cpp @@ -468,7 +468,7 @@ void MigrateWalletActivity::migrate(const std::string& name) auto res{node().walletLoader().migrateWallet(name, passphrase)}; if (res) { - m_success_message = tr("The wallet '%1' was migrated successfully.").arg(GUIUtil::HtmlEscape(GUIUtil::WalletDisplayName(res->wallet->getWalletName()))); + m_success_message = tr("The wallet '%1' was migrated successfully.").arg(GUIUtil::HtmlEscape(GUIUtil::WalletDisplayName(name))); if (res->watchonly_wallet_name) { m_success_message += QChar(' ') + tr("Watchonly scripts have been migrated to a new wallet named '%1'.").arg(GUIUtil::HtmlEscape(GUIUtil::WalletDisplayName(res->watchonly_wallet_name.value()))); } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 44a2395635..2d04c01940 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4141,6 +4141,9 @@ util::Result CWallet::ApplyMigrationData(WalletBatch& local_wallet_batch, return util::Error{Untranslated(STR_INTERNAL_BUG("Error: Legacy wallet data missing"))}; } + // Note: when the legacy wallet has no spendable scripts, it must be empty at the end of the process. + bool has_spendable_material = !data.desc_spkms.empty() || data.master_key.key.IsValid(); + // Get all invalid or non-watched scripts that will not be migrated std::set not_migrated_dests; for (const auto& script : legacy_spkm->GetNotMineScriptPubKeys()) { @@ -4172,9 +4175,9 @@ util::Result CWallet::ApplyMigrationData(WalletBatch& local_wallet_batch, m_external_spk_managers.clear(); m_internal_spk_managers.clear(); - // Setup new descriptors + // Setup new descriptors (only if we are migrating any key material) SetWalletFlagWithDB(local_wallet_batch, WALLET_FLAG_DESCRIPTORS); - if (!IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) { + if (has_spendable_material && !IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) { // Use the existing master key if we have it if (data.master_key.key.IsValid()) { SetupDescriptorScriptPubKeyMans(local_wallet_batch, data.master_key); @@ -4324,6 +4327,14 @@ util::Result CWallet::ApplyMigrationData(WalletBatch& local_wallet_batch, } } + // If there was no key material in the main wallet, there should be no records on it anymore. + // This wallet will be discarded at the end of the process. Only wallets that contain the + // migrated records will be presented to the user. + if (!has_spendable_material) { + if (!m_address_book.empty()) return util::Error{_("Error: Not all address book records were migrated")}; + if (!mapWallet.empty()) return util::Error{_("Error: Not all transaction records were migrated")}; + } + return {}; // all good } @@ -4576,6 +4587,14 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr } } + // Indicates whether the current wallet is empty after migration. + // Notes: + // When non-empty: the local wallet becomes the main spendable wallet. + // When empty: The local wallet is excluded from the result, as the + // user does not expect an empty spendable wallet after + // migrating only watch-only scripts. + bool empty_local_wallet = false; + { LOCK(local_wallet->cs_wallet); // First change to using SQLite @@ -4585,6 +4604,8 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr success = local_wallet->IsWalletFlagSet(WALLET_FLAG_BLANK_WALLET); if (!success) { success = DoMigration(*local_wallet, context, error, res); + // No scripts mean empty wallet after migration + empty_local_wallet = local_wallet->GetAllScriptPubKeyMans().empty(); } else { // Make sure that descriptors flag is actually set local_wallet->SetWalletFlag(WALLET_FLAG_DESCRIPTORS); @@ -4614,6 +4635,15 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr if (success) { + Assume(!res.wallet); // We will set it here. + // Check if the local wallet is empty after migration + if (empty_local_wallet) { + // This wallet has no records. We can safely remove it. + std::vector paths_to_remove = local_wallet->GetDatabase().Files(); + local_wallet.reset(); + for (const auto& path_to_remove : paths_to_remove) fs::remove_all(path_to_remove); + } + LogInfo("Loading new wallets after migration...\n"); // Migration successful, unload all wallets locally, then reload them. // Note: We use a pointer to the shared_ptr to avoid increasing its reference count, @@ -4624,12 +4654,14 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // Save db path and reload wallet track_for_cleanup(*wallet); success = reload_wallet(wallet); + + // When no wallet is set, set the main wallet. + if (!res.wallet) { + res.wallet_name = wallet->GetName(); + res.wallet = std::move(wallet); + } } } - - // Set main wallet - res.wallet = local_wallet; - res.wallet_name = wallet_name; } if (!success) { // Migration failed, cleanup diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 8129baf438..264a91db8f 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -117,9 +117,14 @@ def migrate_and_get_rpc(self, wallet_name, **kwargs): # Migrate, checking that rescan does not occur with self.master_node.assert_debug_log(expected_msgs=[], unexpected_msgs=["Rescanning"]): migrate_info = self.master_node.migratewallet(wallet_name=wallet_name, **kwargs) + # Update wallet name in case the initial wallet was completely migrated to a watch-only wallet + # (in which case the wallet name would be suffixed by the 'watchonly' term) + wallet_name = migrate_info['wallet_name'] wallet = self.master_node.get_wallet_rpc(wallet_name) assert_equal(wallet.getwalletinfo()["descriptors"], True) self.assert_is_sqlite(wallet_name) + # Always verify the backup path exist after migration + assert os.path.exists(migrate_info['backup_path']) return migrate_info, wallet def test_basic(self): @@ -1127,8 +1132,15 @@ def test_migrate_simple_watch_only(self): wallet.importaddress(address=p2pk_script.hex()) # Migrate wallet in the latest node res, _ = self.migrate_and_get_rpc("bare_p2pk") - wo_wallet = self.master_node.get_wallet_rpc(res['watchonly_name']) + wo_wallet = self.master_node.get_wallet_rpc(res['wallet_name']) assert_equal(wo_wallet.listdescriptors()['descriptors'][0]['desc'], descsum_create(f'pk({pubkey.hex()})')) + assert_equal(wo_wallet.getwalletinfo()["private_keys_enabled"], False) + + # Ensure that migrating a wallet with watch-only scripts does not create a spendable wallet. + assert_equal('bare_p2pk_watchonly', res['wallet_name']) + assert "bare_p2pk" not in self.master_node.listwallets() + assert "bare_p2pk" not in [w["name"] for w in self.master_node.listwalletdir()["wallets"]] + wo_wallet.unloadwallet() def test_manual_keys_import(self): @@ -1158,7 +1170,9 @@ def test_manual_keys_import(self): res, _ = self.migrate_and_get_rpc("import_pubkeys") # Same as before, there should be descriptors in the watch-only wallet for the imported pubkey - wo_wallet = self.nodes[0].get_wallet_rpc(res['watchonly_name']) + wo_wallet = self.nodes[0].get_wallet_rpc(res['wallet_name']) + # Assert this is a watch-only wallet + assert_equal(wo_wallet.getwalletinfo()["private_keys_enabled"], False) # As we imported the pubkey only, there will be no key origin in the following descriptors pk_desc = descsum_create(f'pk({pubkey_hex})') pkh_desc = descsum_create(f'pkh({pubkey_hex})') @@ -1169,6 +1183,10 @@ def test_manual_keys_import(self): # Verify all expected descriptors were migrated migrated_desc = [item['desc'] for item in wo_wallet.listdescriptors()['descriptors']] assert_equal(expected_descs, migrated_desc) + # Ensure that migrating a wallet with watch-only scripts does not create a spendable wallet. + assert_equal('import_pubkeys_watchonly', res['wallet_name']) + assert "import_pubkeys" not in self.master_node.listwallets() + assert "import_pubkeys" not in [w["name"] for w in self.master_node.listwalletdir()["wallets"]] wo_wallet.unloadwallet() def test_p2wsh(self): From 432b39cee75eeeeb078e0660bcfb76776139a245 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Tue, 15 Jul 2025 16:11:36 -0700 Subject: [PATCH 203/356] wallet: Set migrated wallet name only on success After a wallet is migrated and we are trying to load it, if it could not be loaded, don't try to set the wallet name. Github-Pull: bitcoin/bitcoin#32984 Rebased-From: 8a4cfddf23a4575a1042dfa97d3478727775e8dd --- src/wallet/wallet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 2d04c01940..819117e59b 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4656,7 +4656,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr success = reload_wallet(wallet); // When no wallet is set, set the main wallet. - if (!res.wallet) { + if (success && !res.wallet) { res.wallet_name = wallet->GetName(); res.wallet = std::move(wallet); } From 0760cb6937f75124e6be90d2bf369d810ccf2dc4 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 22 Jan 2026 00:47:14 +0000 Subject: [PATCH 204/356] Wallet/Migration: Backport followups to watchonly-only migration - Comment updates - Use fs::remove when deleting empty migration target - Adapt tests to only having watchonly migration output where applicable - break out of wallet reload loop when any fail Originally-From: f4c7e28e80bf9af50b03a770b641fd309a801589 and d70b159c42008ac3b63d1c43d99d4f1316d2f1ef and 82caa8193a3e36f248dcc949e0cd41def191efac (#34156) --- src/wallet/wallet.cpp | 14 +++++++++----- test/functional/wallet_migration.py | 13 +++---------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 819117e59b..b76c4055ee 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4641,7 +4641,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // This wallet has no records. We can safely remove it. std::vector paths_to_remove = local_wallet->GetDatabase().Files(); local_wallet.reset(); - for (const auto& path_to_remove : paths_to_remove) fs::remove_all(path_to_remove); + for (const auto& path_to_remove : paths_to_remove) fs::remove(path_to_remove); } LogInfo("Loading new wallets after migration...\n"); @@ -4651,12 +4651,16 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr for (std::shared_ptr* wallet_ptr : {&local_wallet, &res.watchonly_wallet, &res.solvables_wallet}) { if (success && *wallet_ptr) { std::shared_ptr& wallet = *wallet_ptr; - // Save db path and reload wallet + // Track db path and load wallet track_for_cleanup(*wallet); - success = reload_wallet(wallet); + if (!reload_wallet(wallet)) { + success = false; + break; + } - // When no wallet is set, set the main wallet. - if (success && !res.wallet) { + // Set the first successfully loaded wallet as the main one. + // The loop order is intentional and must always start with the local wallet. + if (!res.wallet) { res.wallet_name = wallet->GetName(); res.wallet = std::move(wallet); } diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py index 264a91db8f..cbd568fc85 100755 --- a/test/functional/wallet_migration.py +++ b/test/functional/wallet_migration.py @@ -573,8 +573,7 @@ def test_default_wallet_watch_only(self): wallet = self.create_legacy_wallet("", blank=True) wallet.importaddress(master_wallet.getnewaddress(address_type="legacy")) - res, def_wallet = self.migrate_and_get_rpc("") - wallet = self.master_node.get_wallet_rpc("default_wallet_watchonly") + res, wallet = self.migrate_and_get_rpc("") info = wallet.getwalletinfo() assert_equal(info["descriptors"], True) @@ -582,14 +581,8 @@ def test_default_wallet_watch_only(self): assert_equal(info["private_keys_enabled"], False) assert_equal(info["walletname"], "default_wallet_watchonly") - # The default wallet will still exist and have newly generated descriptors - assert (self.master_node.wallets_path / "wallet.dat").exists() - def_wallet_info = def_wallet.getwalletinfo() - assert_equal(def_wallet_info["descriptors"], True) - assert_equal(def_wallet_info["format"], "sqlite") - assert_equal(def_wallet_info["private_keys_enabled"], True) - assert_equal(def_wallet_info["walletname"], "") - assert_greater_than(def_wallet_info["keypoolsize"], 0) + # Check the default wallet is not available anymore + assert not (self.master_node.wallets_path / "wallet.dat").exists() wallet.unloadwallet() self.clear_default_wallet(backup_file=Path(res["backup_path"])) From a5a9245acc6c770d1344e1a7ee1f4131ddf96b42 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Tue, 20 Jan 2026 15:34:46 +0100 Subject: [PATCH 205/356] coins: replace `std::distance` with unambiguous pointer subtraction Avoid calling `std::distance` on null pointers in `PoolResource::AllocateChunk`. Compute remaining bytes with `m_available_memory_end - m_available_memory_it` instead, which is well-defined to be `0` when both are `nullptr`. Co-authored-by: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Github-Pull: bitcoin/bitcoin#34161 Rebased-From: 477c5504e05f9031449cdbf62bf329eac427cb0c --- src/support/allocators/pool.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/support/allocators/pool.h b/src/support/allocators/pool.h index c8e70ebacf..b1a95936f6 100644 --- a/src/support/allocators/pool.h +++ b/src/support/allocators/pool.h @@ -153,7 +153,7 @@ class PoolResource final void AllocateChunk() { // if there is still any available memory left, put it into the freelist. - size_t remaining_available_bytes = std::distance(m_available_memory_it, m_available_memory_end); + size_t remaining_available_bytes = m_available_memory_end - m_available_memory_it; if (0 != remaining_available_bytes) { PlacementAddToList(m_available_memory_it, m_free_lists[remaining_available_bytes / ELEM_ALIGN_BYTES]); } From f8f28911e32243626f0f594a9d22b0367cc16dcf Mon Sep 17 00:00:00 2001 From: sedited Date: Wed, 7 Jan 2026 20:51:28 +0100 Subject: [PATCH 206/356] init: Fix non-zero code on interrupt An interrupt does not create a failure exit code during normal operation. This should also be the case when interrupt is triggered during initialization. However a failure exit code is currently returned if an interrupt occurs during init. Fix this by making `AppInitMain` return true instead of false, which further up the call stack sets the `EXIT_FAILURE` code. Also add a check for the interrupt condition during GUI startup. Github-Pull: bitcoin/bitcoin#34224 Rebased-From: 997e7b4d7cf7c4622938798423447375383184c0 --- src/init.cpp | 4 +-- src/qt/bitcoin.cpp | 77 +++++++++++++++++++++++----------------------- 2 files changed, 41 insertions(+), 40 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 988daefeec..ed427f9008 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1585,7 +1585,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // As the program has not fully started yet, Shutdown() is possibly overkill. if (ShutdownRequested(node)) { LogPrintf("Shutdown requested. Exiting.\n"); - return false; + return true; } ChainstateManager& chainman = *Assert(node.chainman); @@ -1736,7 +1736,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) } if (ShutdownRequested(node)) { - return false; + return true; } // ********************************************************* Step 12: start node diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index b1a8461d02..fa14acbdff 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -404,53 +404,54 @@ void BitcoinApplication::initializeResult(bool success, interfaces::BlockAndHead { qDebug() << __func__ << ": Initialization result: " << success; - if (success) { - delete m_splash; - m_splash = nullptr; + if (!success || m_node->shutdownRequested()) { + requestShutdown(); + return; + } + + delete m_splash; + m_splash = nullptr; - // Log this only after AppInitMain finishes, as then logging setup is guaranteed complete - qInfo() << "Platform customization:" << platformStyle->getName(); - clientModel = new ClientModel(node(), optionsModel); - window->setClientModel(clientModel, &tip_info); + // Log this only after AppInitMain finishes, as then logging setup is guaranteed complete + qInfo() << "Platform customization:" << platformStyle->getName(); + clientModel = new ClientModel(node(), optionsModel); + window->setClientModel(clientModel, &tip_info); - // If '-min' option passed, start window minimized (iconified) or minimized to tray - bool start_minimized = gArgs.GetBoolArg("-min", false); + // If '-min' option passed, start window minimized (iconified) or minimized to tray + bool start_minimized = gArgs.GetBoolArg("-min", false); #ifdef ENABLE_WALLET - if (WalletModel::isWalletEnabled()) { - m_wallet_controller = new WalletController(*clientModel, platformStyle, this); - window->setWalletController(m_wallet_controller, /*show_loading_minimized=*/start_minimized); - if (paymentServer) { - paymentServer->setOptionsModel(optionsModel); - } + if (WalletModel::isWalletEnabled()) { + m_wallet_controller = new WalletController(*clientModel, platformStyle, this); + window->setWalletController(m_wallet_controller, /*show_loading_minimized=*/start_minimized); + if (paymentServer) { + paymentServer->setOptionsModel(optionsModel); } + } #endif // ENABLE_WALLET - // Show or minimize window - if (!start_minimized) { - window->show(); - } else if (clientModel->getOptionsModel()->getMinimizeToTray() && window->hasTrayIcon()) { - // do nothing as the window is managed by the tray icon - } else { - window->showMinimized(); - } - Q_EMIT windowShown(window); + // Show or minimize window + if (!start_minimized) { + window->show(); + } else if (clientModel->getOptionsModel()->getMinimizeToTray() && window->hasTrayIcon()) { + // do nothing as the window is managed by the tray icon + } else { + window->showMinimized(); + } + Q_EMIT windowShown(window); #ifdef ENABLE_WALLET - // Now that initialization/startup is done, process any command-line - // bitcoin: URIs or payment requests: - if (paymentServer) { - connect(paymentServer, &PaymentServer::receivedPaymentRequest, window, &BitcoinGUI::handlePaymentRequest); - connect(window, &BitcoinGUI::receivedURI, paymentServer, &PaymentServer::handleURIOrFile); - connect(paymentServer, &PaymentServer::message, [this](const QString& title, const QString& message, unsigned int style) { - window->message(title, message, style); - }); - QTimer::singleShot(100ms, paymentServer, &PaymentServer::uiReady); - } -#endif - pollShutdownTimer->start(SHUTDOWN_POLLING_DELAY); - } else { - requestShutdown(); + // Now that initialization/startup is done, process any command-line + // bitcoin: URIs or payment requests: + if (paymentServer) { + connect(paymentServer, &PaymentServer::receivedPaymentRequest, window, &BitcoinGUI::handlePaymentRequest); + connect(window, &BitcoinGUI::receivedURI, paymentServer, &PaymentServer::handleURIOrFile); + connect(paymentServer, &PaymentServer::message, [this](const QString& title, const QString& message, unsigned int style) { + window->message(title, message, style); + }); + QTimer::singleShot(100ms, paymentServer, &PaymentServer::uiReady); } +#endif + pollShutdownTimer->start(SHUTDOWN_POLLING_DELAY); } void BitcoinApplication::handleRunawayException(const QString &message) From 8a7ee854bb01ffe60c2aab3ba1d43e9e64c618c8 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 22 Jan 2026 21:31:43 +0000 Subject: [PATCH 207/356] Diff-minimise --- src/qt/bitcoin.cpp | 77 +++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 39 deletions(-) diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index fa14acbdff..0fda82258a 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -404,54 +404,53 @@ void BitcoinApplication::initializeResult(bool success, interfaces::BlockAndHead { qDebug() << __func__ << ": Initialization result: " << success; - if (!success || m_node->shutdownRequested()) { - requestShutdown(); - return; - } - - delete m_splash; - m_splash = nullptr; + if (success && !m_node->shutdownRequested()) { + delete m_splash; + m_splash = nullptr; - // Log this only after AppInitMain finishes, as then logging setup is guaranteed complete - qInfo() << "Platform customization:" << platformStyle->getName(); - clientModel = new ClientModel(node(), optionsModel); - window->setClientModel(clientModel, &tip_info); + // Log this only after AppInitMain finishes, as then logging setup is guaranteed complete + qInfo() << "Platform customization:" << platformStyle->getName(); + clientModel = new ClientModel(node(), optionsModel); + window->setClientModel(clientModel, &tip_info); - // If '-min' option passed, start window minimized (iconified) or minimized to tray - bool start_minimized = gArgs.GetBoolArg("-min", false); + // If '-min' option passed, start window minimized (iconified) or minimized to tray + bool start_minimized = gArgs.GetBoolArg("-min", false); #ifdef ENABLE_WALLET - if (WalletModel::isWalletEnabled()) { - m_wallet_controller = new WalletController(*clientModel, platformStyle, this); - window->setWalletController(m_wallet_controller, /*show_loading_minimized=*/start_minimized); - if (paymentServer) { - paymentServer->setOptionsModel(optionsModel); + if (WalletModel::isWalletEnabled()) { + m_wallet_controller = new WalletController(*clientModel, platformStyle, this); + window->setWalletController(m_wallet_controller, /*show_loading_minimized=*/start_minimized); + if (paymentServer) { + paymentServer->setOptionsModel(optionsModel); + } } - } #endif // ENABLE_WALLET - // Show or minimize window - if (!start_minimized) { - window->show(); - } else if (clientModel->getOptionsModel()->getMinimizeToTray() && window->hasTrayIcon()) { - // do nothing as the window is managed by the tray icon - } else { - window->showMinimized(); - } - Q_EMIT windowShown(window); + // Show or minimize window + if (!start_minimized) { + window->show(); + } else if (clientModel->getOptionsModel()->getMinimizeToTray() && window->hasTrayIcon()) { + // do nothing as the window is managed by the tray icon + } else { + window->showMinimized(); + } + Q_EMIT windowShown(window); #ifdef ENABLE_WALLET - // Now that initialization/startup is done, process any command-line - // bitcoin: URIs or payment requests: - if (paymentServer) { - connect(paymentServer, &PaymentServer::receivedPaymentRequest, window, &BitcoinGUI::handlePaymentRequest); - connect(window, &BitcoinGUI::receivedURI, paymentServer, &PaymentServer::handleURIOrFile); - connect(paymentServer, &PaymentServer::message, [this](const QString& title, const QString& message, unsigned int style) { - window->message(title, message, style); - }); - QTimer::singleShot(100ms, paymentServer, &PaymentServer::uiReady); - } + // Now that initialization/startup is done, process any command-line + // bitcoin: URIs or payment requests: + if (paymentServer) { + connect(paymentServer, &PaymentServer::receivedPaymentRequest, window, &BitcoinGUI::handlePaymentRequest); + connect(window, &BitcoinGUI::receivedURI, paymentServer, &PaymentServer::handleURIOrFile); + connect(paymentServer, &PaymentServer::message, [this](const QString& title, const QString& message, unsigned int style) { + window->message(title, message, style); + }); + QTimer::singleShot(100ms, paymentServer, &PaymentServer::uiReady); + } #endif - pollShutdownTimer->start(SHUTDOWN_POLLING_DELAY); + pollShutdownTimer->start(SHUTDOWN_POLLING_DELAY); + } else { + requestShutdown(); + } } void BitcoinApplication::handleRunawayException(const QString &message) From a179ccc654c70165ecae0c01fd3e593b1a320de6 Mon Sep 17 00:00:00 2001 From: glozow Date: Thu, 8 Jan 2026 07:55:25 -0800 Subject: [PATCH 208/356] [miniminer] stop assuming ancestor fees >= self fees Negative fees are possible with prioritisetransaction. Github-Pull: bitcoin/bitcoin#34235 Rebased-From: 2cade5d5d17010cd89855b26da350d6e54683805 --- src/node/mini_miner.cpp | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/node/mini_miner.cpp b/src/node/mini_miner.cpp index 2827242f96..925fd6a76f 100644 --- a/src/node/mini_miner.cpp +++ b/src/node/mini_miner.cpp @@ -167,8 +167,7 @@ void MiniMiner::DeleteAncestorPackage(const std::setsecond) { - // If these fail, we must be double-deducting. - Assume(descendant->second.GetModFeesWithAncestors() >= anc->second.GetModifiedFee()); + // If this fails, we must be double-deducting. Don't check fees because negative is possible. Assume(descendant->second.GetSizeWithAncestors() >= anc->second.GetTxSize()); descendant->second.UpdateAncestorState(-anc->second.GetTxSize(), -anc->second.GetModifiedFee()); } @@ -192,10 +191,9 @@ void MiniMiner::SanityCheck() const // m_entries, m_entries_by_txid, and m_descendant_set_by_txid all same size Assume(m_entries.size() == m_entries_by_txid.size()); Assume(m_entries.size() == m_descendant_set_by_txid.size()); - // Cached ancestor values should be at least as large as the transaction's own fee and size + // Cached ancestor values should be at least as large as the transaction's own size Assume(std::all_of(m_entries.begin(), m_entries.end(), [](const auto& entry) { - return entry->second.GetSizeWithAncestors() >= entry->second.GetTxSize() && - entry->second.GetModFeesWithAncestors() >= entry->second.GetModifiedFee();})); + return entry->second.GetSizeWithAncestors() >= entry->second.GetTxSize();})); // None of the entries should be to-be-replaced transactions Assume(std::all_of(m_to_be_replaced.begin(), m_to_be_replaced.end(), [&](const auto& txid){return m_entries_by_txid.find(txid) == m_entries_by_txid.end();})); From 69f9349c6772df076fdd59995eb22dbffb677b1f Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Sun, 11 Jan 2026 14:09:44 +0100 Subject: [PATCH 209/356] doc: add 433 (Pay to Anchor) to bips.md Github-Pull: bitcoin/bitcoin#34252 Rebased-From: 44b12cdb11f0fd3264f24f537a5d6989e4fe96a9 --- doc/bips.md | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/bips.md b/doc/bips.md index d544ff822b..edb166a89f 100644 --- a/doc/bips.md +++ b/doc/bips.md @@ -73,3 +73,4 @@ BIPs that are implemented by Bitcoin Core: * [`BIP 386`](https://github.com/bitcoin/bips/blob/master/bip-0386.mediawiki): tr() Output Script Descriptors are implemented as of **v22.0** ([PR 22051](https://github.com/bitcoin/bitcoin/pull/22051)). * [`BIP 387`](https://github.com/bitcoin/bips/blob/master/bip-0387.mediawiki): Tapscript Multisig Output Script Descriptors are implemented as of **v24.0** ([PR 24043](https://github.com/bitcoin/bitcoin/pull/24043)). * [`BIP 431`](https://github.com/bitcoin/bips/blob/master/bip-0431.mediawiki): transactions with nVersion=3 are standard and treated as Topologically Restricted Until Confirmation as of **v28.0** ([PR 29496](https://github.com/bitcoin/bitcoin/pull/29496)). +* [`BIP 433`](https://github.com/bitcoin/bips/blob/master/bip-0433.mediawiki): Spending of Pay to Anchor (P2A) outputs is standard as of **v28.0** ([PR 30352](https://github.com/bitcoin/bitcoin/pull/30352)). From bc942ae369b221369da0437e375bf88c8eefb365 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Fri, 21 Nov 2025 12:38:03 +0000 Subject: [PATCH 210/356] Revert "gui, qt: brintToFront workaround for Wayland" This reverts commit 15aa7d023688700a47997b92108de95f2d864f5a. Github-Pull: bitcoin-core/gui#914 Rebased-From: 0672e727bf1db5995562e9656d18b699aeba5fe0 --- src/qt/guiutil.cpp | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index 2369f6b631..63def42aec 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -405,26 +405,19 @@ bool isObscured(QWidget *w) void bringToFront(QWidget* w) { - if (w) { - if (QGuiApplication::platformName() == "wayland") { - auto flags = w->windowFlags(); - w->setWindowFlags(flags|Qt::WindowStaysOnTopHint); - w->show(); - w->setWindowFlags(flags); - w->show(); - } else { #ifdef Q_OS_MACOS - ForceActivation(); + ForceActivation(); #endif - // activateWindow() (sometimes) helps with keyboard focus on Windows - if (w->isMinimized()) { - w->showNormal(); - } else { - w->show(); - } - w->activateWindow(); - w->raise(); + + if (w) { + // activateWindow() (sometimes) helps with keyboard focus on Windows + if (w->isMinimized()) { + w->showNormal(); + } else { + w->show(); } + w->activateWindow(); + w->raise(); } } From 62774df86f2d6f7a3faeb14b67c50ceb449e57ec Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 22 Jan 2026 23:10:08 +0000 Subject: [PATCH 211/356] GUI: Restore bringToFront Wayland workaround for Qt versions <6.3.2 when the bug was fixed --- src/qt/guiutil.cpp | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index 63def42aec..dc12719d63 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -410,14 +410,26 @@ void bringToFront(QWidget* w) #endif if (w) { - // activateWindow() (sometimes) helps with keyboard focus on Windows - if (w->isMinimized()) { - w->showNormal(); - } else { +#if (QT_VERSION < QT_VERSION_CHECK(6, 3, 2)) + if (QGuiApplication::platformName() == "wayland") { + // Workaround for bug fixed in https://codereview.qt-project.org/c/qt/qtwayland/+/421125 + auto flags = w->windowFlags(); + w->setWindowFlags(flags|Qt::WindowStaysOnTopHint); w->show(); + w->setWindowFlags(flags); + w->show(); + } else +#endif + { + // activateWindow() (sometimes) helps with keyboard focus on Windows + if (w->isMinimized()) { + w->showNormal(); + } else { + w->show(); + } + w->activateWindow(); + w->raise(); } - w->activateWindow(); - w->raise(); } } From 7475d134f6a3a6039ab6b9d39706ade47c764aa8 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 15 Jan 2026 19:01:19 +0000 Subject: [PATCH 212/356] Wallet/bdb: Safely and correctly list files only used by the single wallet If any other files exist in the directory, we cannot assume the sharable files are exclusively for this wallet. But if they are, this also cleans up other log.* files --- src/wallet/bdb.cpp | 48 ++++++++++++++++++++++++++++++++++++++++++++++ src/wallet/bdb.h | 15 +-------------- 2 files changed, 49 insertions(+), 14 deletions(-) diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index 79851dff33..f5a18266ed 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -340,6 +341,53 @@ bool BerkeleyDatabase::Verify(bilingual_str& errorStr) return true; } +std::vector BerkeleyDatabase::Files() +{ + std::vector files; + // If the wallet is the *only* file, clean up the entire BDB environment + constexpr auto build_files_list = [](std::vector& files, const std::shared_ptr& env, const fs::path& filename) { + if (env->m_databases.size() != 1) return false; + + const auto env_dir = env->Directory(); + const auto db_subdir = env_dir / "database"; + if (fs::exists(db_subdir)) { + if (!fs::is_directory(db_subdir)) return false; + for (const auto& entry : fs::directory_iterator(db_subdir)) { + const auto& path = entry.path().filename(); + if (!fs::PathToString(path).starts_with("log.")) { + return false; + } + files.emplace_back(entry.path()); + } + } + const std::set allowed_paths = { + filename, + "db.log", + ".walletlock", + "database" + }; + for (const auto& entry : fs::directory_iterator(env_dir)) { + const auto& path = entry.path().filename(); + if (allowed_paths.contains(path)) { + files.emplace_back(entry.path()); + } else if (fs::is_directory(entry.path())) { + // Subdirectories can't possibly be using this db env, and is expected if this is a non-directory wallet + // Do not include them in Files, but still allow the env cleanup + } else { + return false; + } + } + return true; + }; + try { + if (build_files_list(files, env, m_filename)) return files; + } catch (...) { + // Give up building the comprehensive file list if any error occurs + } + // Otherwise, it's only really safe to delete the one wallet file + return {env->Directory() / m_filename}; +} + void BerkeleyEnvironment::CheckpointLSN(const std::string& strFile) { dbenv->txn_checkpoint(0, 0, 0); diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h index ec773fd177..a7cf953ed2 100644 --- a/src/wallet/bdb.h +++ b/src/wallet/bdb.h @@ -132,20 +132,7 @@ class BerkeleyDatabase : public WalletDatabase /** Return path to main database filename */ std::string Filename() override { return fs::PathToString(env->Directory() / m_filename); } - std::vector Files() override - { - std::vector files; - files.emplace_back(env->Directory() / m_filename); - if (env->m_databases.size() == 1) { - files.emplace_back(env->Directory() / "db.log"); - files.emplace_back(env->Directory() / ".walletlock"); - files.emplace_back(env->Directory() / "database" / "log.0000000001"); - files.emplace_back(env->Directory() / "database"); - // Note that this list is not exhaustive as BDB may create more log files, and possibly other ones too - // However it should be good enough for the only calls to Files() - } - return files; - } + std::vector Files() override; std::string Format() override { return "bdb"; } /** From 60f529027c6eacbdc298fab50192f8c60d7082a1 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 15 Jan 2026 19:03:23 +0000 Subject: [PATCH 213/356] Wallet/Migration: If loading the new watchonly or solvables wallet fails, log the correct wallet name in error message --- src/wallet/wallet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 913c745320..992b02a989 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4532,7 +4532,7 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr to_reload = LoadWallet(context, name, /*load_on_start=*/std::nullopt, options, status, error, warnings); if (!to_reload) { LogError("Failed to load wallet '%s' after migration. Rolling back migration to preserve consistency. " - "Error cause: %s\n", wallet_name, error.original); + "Error cause: %s\n", name, error.original); return false; } return true; From cef01d0be5223e9d33efc897d7fbe5d0a08692c0 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 15 Jan 2026 19:27:23 +0000 Subject: [PATCH 214/356] Wallet/Migration: Skip moving the backup file back and forth for no reason Since we no longer delete the wallet directory, there's no need to vacate it The moving only served to risk errors by crossing filesystem boundaries (which fs::rename can't handle) --- src/wallet/wallet.cpp | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 992b02a989..df53f904cc 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4634,9 +4634,6 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr } if (!success) { // Migration failed, cleanup - // Before deleting the wallet's directory, copy the backup file to the top-level wallets dir - fs::path temp_backup_location = fsbridge::AbsPathJoin(GetWalletDir(), backup_filename); - fs::rename(backup_path, temp_backup_location); // Make list of wallets to cleanup std::vector> created_wallets; @@ -4680,15 +4677,12 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // Convert the backup file to the wallet db file by renaming it and moving it into the wallet's directory. // Reload it into memory if the wallet was previously loaded. bilingual_str restore_error; - const auto& ptr_wallet = RestoreWallet(context, temp_backup_location, wallet_name, /*load_on_start=*/std::nullopt, status, restore_error, warnings, /*load_after_restore=*/was_loaded); + const auto& ptr_wallet = RestoreWallet(context, backup_path, wallet_name, /*load_on_start=*/std::nullopt, status, restore_error, warnings, /*load_after_restore=*/was_loaded); if (!restore_error.empty()) { error += restore_error + _("\nUnable to restore backup of wallet."); return util::Error{error}; } - // The wallet directory has been restored, but just in case, copy the previously created backup to the wallet dir - fs::rename(temp_backup_location, backup_path); - // Verify that there is no dangling wallet: when the wallet wasn't loaded before, expect null. // This check is performed after restoration to avoid an early error before saving the backup. bool wallet_reloaded = ptr_wallet != nullptr; From 69a6b9b1152ba0bb3edab6d2a54509fd416b24c8 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 20 Jan 2026 18:20:14 +0000 Subject: [PATCH 215/356] Bugfix: Wallet/Migration: Move backup into wallet directory when migrating from non-directory While 30.x+ keep backup files in walletdir, 29.x places them in the migrated wallet directory --- src/wallet/wallet.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index df53f904cc..45adda65ea 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4581,6 +4581,12 @@ util::Result MigrateLegacyToDescriptor(std::shared_ptr // First change to using SQLite if (!local_wallet->MigrateToSQLite(error)) return util::Error{error}; + // In case we're migrating from file to directory, move the backup into it + this_wallet_dir = fs::absolute(fs::PathFromString(local_wallet->GetDatabase().Filename())).parent_path(); + backup_path = this_wallet_dir / backup_filename; + fs::rename(res.backup_path, backup_path); + res.backup_path = backup_path; + // Do the migration of keys and scripts for non-blank wallets, and cleanup if it fails success = local_wallet->IsWalletFlagSet(WALLET_FLAG_BLANK_WALLET); if (!success) { From 65173944ed60df3b9cffca95932aed8720921478 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 21 Jan 2026 21:32:40 +0000 Subject: [PATCH 216/356] QA: tool_wallet: Check that db.log is deleted with a lone legacy wallet, but not with a shared db environment --- test/functional/tool_wallet.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py index 979804a5fe..788d9b0ee8 100755 --- a/test/functional/tool_wallet.py +++ b/test/functional/tool_wallet.py @@ -410,17 +410,30 @@ def test_dump_createfromdump(self): self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump') assert not (self.nodes[0].wallets_path / "badload").is_dir() if not self.options.descriptors: - os.rename(self.nodes[0].wallets_path / "wallet.dat", self.nodes[0].wallets_path / "default.wallet.dat") + os.rename(self.nodes[0].wallets_path / "wallet.dat", self.nodes[0].wallets_path / "../default.wallet.dat") + (self.nodes[0].wallets_path / "db.log").unlink(missing_ok=True) self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump') assert self.nodes[0].wallets_path.exists() assert not (self.nodes[0].wallets_path / "wallet.dat").exists() + if not self.options.descriptors: + assert not (self.nodes[0].wallets_path / "db.log").exists() self.log.info('Checking createfromdump with an unnamed wallet') self.do_tool_createfromdump("", "wallet.dump") assert (self.nodes[0].wallets_path / "wallet.dat").exists() os.unlink(self.nodes[0].wallets_path / "wallet.dat") if not self.options.descriptors: - os.rename(self.nodes[0].wallets_path / "default.wallet.dat", self.nodes[0].wallets_path / "wallet.dat") + os.rename(self.nodes[0].wallets_path / "../default.wallet.dat", self.nodes[0].wallets_path / "wallet.dat") + + self.log.info('Checking createfromdump with multiple non-directory wallets') + assert not (self.nodes[0].wallets_path / "wallet.dat").is_dir() + assert (self.nodes[0].wallets_path / "db.log").exists() + os.rename(self.nodes[0].wallets_path / "wallet.dat", self.nodes[0].wallets_path / "test.dat") + self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump') + assert not (self.nodes[0].wallets_path / "wallet.dat").exists() + assert (self.nodes[0].wallets_path / "test.dat").exists() + assert (self.nodes[0].wallets_path / "db.log").exists() + os.rename(self.nodes[0].wallets_path / "test.dat", self.nodes[0].wallets_path / "wallet.dat") def test_chainless_conflicts(self): self.log.info("Test wallet tool when wallet contains conflicting transactions") From 254617ec290ceee0b31f08145f86b3a4301940e8 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 7 Jan 2026 22:18:08 +0000 Subject: [PATCH 217/356] Bugfix: wallet/bdb: Check for and safely handle errors from txn_checkpoint and lsn_reset --- src/wallet/bdb.cpp | 45 ++++++++++++++++++++++++++++++++++----------- src/wallet/bdb.h | 2 +- 2 files changed, 35 insertions(+), 12 deletions(-) diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index f5a18266ed..60d29bc7af 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -388,12 +388,17 @@ std::vector BerkeleyDatabase::Files() return {env->Directory() / m_filename}; } -void BerkeleyEnvironment::CheckpointLSN(const std::string& strFile) +bool BerkeleyEnvironment::CheckpointLSN(const std::string& strFile) { - dbenv->txn_checkpoint(0, 0, 0); - if (fMockDb) - return; - dbenv->lsn_reset(strFile.c_str(), 0); + if (dbenv->txn_checkpoint(0, 0, 0) != 0) { + return false; + } + if (!fMockDb) { + if (dbenv->lsn_reset(strFile.c_str(), 0) != 0) { + return false; + } + } + return true; } BerkeleyDatabase::~BerkeleyDatabase() @@ -569,7 +574,10 @@ bool BerkeleyDatabase::Rewrite(const char* pszSkip) if (m_refcount <= 0) { // Flush log data to the dat file env->CloseDb(m_filename); - env->CheckpointLSN(strFile); + if (!env->CheckpointLSN(strFile)) { + LogPrintLevel(BCLog::WALLETDB, BCLog::Level::Error, "%s: Failed to checkpoint database file %s\n", __func__, strFile); + return false; + } m_refcount = -1; bool fSuccess = true; @@ -669,10 +677,19 @@ void BerkeleyEnvironment::Flush(bool fShutdown) // Move log data to the dat file CloseDb(filename); LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s checkpoint\n", strFile); - dbenv->txn_checkpoint(0, 0, 0); + if (dbenv->txn_checkpoint(0, 0, 0) != 0) { + LogPrintLevel(BCLog::WALLETDB, BCLog::Level::Error, "%s: %s checkpoint FAILED\n", __func__, strFile); + no_dbs_accessed = false; + continue; + } LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s detach\n", strFile); - if (!fMockDb) - dbenv->lsn_reset(strFile.c_str(), 0); + if (!fMockDb) { + if (dbenv->lsn_reset(strFile.c_str(), 0) != 0) { + LogPrintLevel(BCLog::WALLETDB, BCLog::Level::Error, "%s: %s detach FAILED\n", __func__, strFile); + no_dbs_accessed = false; + continue; + } + } LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s closed\n", strFile); nRefCount = -1; } else { @@ -713,7 +730,10 @@ bool BerkeleyDatabase::PeriodicFlush() // Flush wallet file so it's self contained env->CloseDb(m_filename); - env->CheckpointLSN(strFile); + if (!env->CheckpointLSN(strFile)) { + LogPrintLevel(BCLog::WALLETDB, BCLog::Level::Error, "%s: FAILED to flush wallet %s\n", __func__, strFile); + return false; + } m_refcount = -1; LogDebug(BCLog::WALLETDB, "Flushed %s %dms\n", strFile, Ticks(SteadyClock::now() - start)); @@ -732,7 +752,10 @@ bool BerkeleyDatabase::Backup(const std::string& strDest) const { // Flush log data to the dat file env->CloseDb(m_filename); - env->CheckpointLSN(strFile); + if (!env->CheckpointLSN(strFile)) { + LogPrintLevel(BCLog::WALLETDB, BCLog::Level::Error, "%s: FAILED to flush wallet %s\n", __func__, strFile); + return false; + } // Copy wallet file fs::path pathSrc = env->Directory() / m_filename; diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h index a7cf953ed2..0eb50c0ad7 100644 --- a/src/wallet/bdb.h +++ b/src/wallet/bdb.h @@ -69,7 +69,7 @@ class BerkeleyEnvironment bool Open(bilingual_str& error); void Close(); void Flush(bool fShutdown); - void CheckpointLSN(const std::string& strFile); + [[nodiscard]] bool CheckpointLSN(const std::string& strFile); void CloseDb(const fs::path& filename); void ReloadDbEnv(); From e9b272757445568e1c400256761a0949256d8c3e Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 7 Jan 2026 22:38:28 +0000 Subject: [PATCH 218/356] Bugfix: wallet/bdb: Only release walletdir lock after deleting BDB "database" subdirectory Otherwise, there's a race condition if another program/instance opens a wallet before we delete it --- src/wallet/bdb.cpp | 9 ++++++--- src/wallet/bdb.h | 2 +- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index 60d29bc7af..b4e2675f8f 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -95,7 +95,7 @@ std::shared_ptr GetBerkeleyEnv(const fs::path& env_director // BerkeleyBatch // -void BerkeleyEnvironment::Close() +void BerkeleyEnvironment::Close(const bool do_unlock) { if (!fDbEnvInit) return; @@ -122,7 +122,9 @@ void BerkeleyEnvironment::Close() if (error_file) fclose(error_file); - UnlockDirectory(fs::PathFromString(strPath), ".walletlock"); + if (do_unlock) { + UnlockDirectory(fs::PathFromString(strPath), ".walletlock"); + } } void BerkeleyEnvironment::Reset() @@ -701,10 +703,11 @@ void BerkeleyEnvironment::Flush(bool fShutdown) char** listp; if (no_dbs_accessed) { dbenv->log_archive(&listp, DB_ARCH_REMOVE); - Close(); + Close(/*do_unlock=*/false); if (!fMockDb) { fs::remove_all(fs::PathFromString(strPath) / "database"); } + UnlockDirectory(fs::PathFromString(strPath), ".walletlock"); } } } diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h index 0eb50c0ad7..2baf80a48a 100644 --- a/src/wallet/bdb.h +++ b/src/wallet/bdb.h @@ -67,7 +67,7 @@ class BerkeleyEnvironment fs::path Directory() const { return fs::PathFromString(strPath); } bool Open(bilingual_str& error); - void Close(); + void Close(bool do_unlock = true); void Flush(bool fShutdown); [[nodiscard]] bool CheckpointLSN(const std::string& strFile); From f990766d8b17702e0c44705a5cb9f3e15ece234a Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 7 Jan 2026 23:21:27 +0000 Subject: [PATCH 219/356] Bugfix: wallet/bdb: Catch and handle exceptions deleting "database" directory --- src/wallet/bdb.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index b4e2675f8f..9e8b78eb07 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -705,7 +705,11 @@ void BerkeleyEnvironment::Flush(bool fShutdown) dbenv->log_archive(&listp, DB_ARCH_REMOVE); Close(/*do_unlock=*/false); if (!fMockDb) { - fs::remove_all(fs::PathFromString(strPath) / "database"); + try { + fs::remove_all(fs::PathFromString(strPath) / "database"); + } catch (const fs::filesystem_error& e) { + LogPrintLevel(BCLog::WALLETDB, BCLog::Level::Error, "%s: FAILED to delete \"database\" directory: %s\n", __func__, fsbridge::get_filesystem_error_message(e)); + } } UnlockDirectory(fs::PathFromString(strPath), ".walletlock"); } From dd14284bc42e3f7ecb1c39db6f4d64ee127e3803 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 7 Jan 2026 23:33:50 +0000 Subject: [PATCH 220/356] Bugfix: wallet/bdb: Don't nuke "database" directory even on clean shutdown It's hypothetically possible, albeit difficult to arrange, that other non-loaded wallets are still relying on it --- src/wallet/bdb.cpp | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index 9e8b78eb07..9cf9cfc918 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -199,7 +199,7 @@ bool BerkeleyEnvironment::Open(bilingual_str& err) Reset(); err = strprintf(_("Error initializing wallet database environment %s!"), fs::quoted(fs::PathToString(Directory()))); if (ret == DB_RUNRECOVERY) { - err += Untranslated(" ") + _("This error could occur if this wallet was not shutdown cleanly and was last loaded using a build with a newer version of Berkeley DB. If so, please use the software that last loaded this wallet"); + err += Untranslated(" ") + _("This error could occur if this wallet was last loaded using a build with a newer version of Berkeley DB."); } return false; } @@ -704,13 +704,6 @@ void BerkeleyEnvironment::Flush(bool fShutdown) if (no_dbs_accessed) { dbenv->log_archive(&listp, DB_ARCH_REMOVE); Close(/*do_unlock=*/false); - if (!fMockDb) { - try { - fs::remove_all(fs::PathFromString(strPath) / "database"); - } catch (const fs::filesystem_error& e) { - LogPrintLevel(BCLog::WALLETDB, BCLog::Level::Error, "%s: FAILED to delete \"database\" directory: %s\n", __func__, fsbridge::get_filesystem_error_message(e)); - } - } UnlockDirectory(fs::PathFromString(strPath), ".walletlock"); } } From 65fa960550e5bd7d549efeb47fbed8c5d7581e0c Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 7 Jan 2026 22:40:32 +0000 Subject: [PATCH 221/356] wallet/bdb: Remove no-longer-needed do_unlock parameter of BerkeleyEnvironment::Close() This reverts commit 8a06c8cf55f67934a4d86732217e38e42ac5198e. --- src/wallet/bdb.cpp | 9 +++------ src/wallet/bdb.h | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index 9cf9cfc918..81e9395178 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -95,7 +95,7 @@ std::shared_ptr GetBerkeleyEnv(const fs::path& env_director // BerkeleyBatch // -void BerkeleyEnvironment::Close(const bool do_unlock) +void BerkeleyEnvironment::Close() { if (!fDbEnvInit) return; @@ -122,9 +122,7 @@ void BerkeleyEnvironment::Close(const bool do_unlock) if (error_file) fclose(error_file); - if (do_unlock) { - UnlockDirectory(fs::PathFromString(strPath), ".walletlock"); - } + UnlockDirectory(fs::PathFromString(strPath), ".walletlock"); } void BerkeleyEnvironment::Reset() @@ -703,8 +701,7 @@ void BerkeleyEnvironment::Flush(bool fShutdown) char** listp; if (no_dbs_accessed) { dbenv->log_archive(&listp, DB_ARCH_REMOVE); - Close(/*do_unlock=*/false); - UnlockDirectory(fs::PathFromString(strPath), ".walletlock"); + Close(); } } } diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h index 2baf80a48a..0eb50c0ad7 100644 --- a/src/wallet/bdb.h +++ b/src/wallet/bdb.h @@ -67,7 +67,7 @@ class BerkeleyEnvironment fs::path Directory() const { return fs::PathFromString(strPath); } bool Open(bilingual_str& error); - void Close(bool do_unlock = true); + void Close(); void Flush(bool fShutdown); [[nodiscard]] bool CheckpointLSN(const std::string& strFile); From 5e0cbf59b37b32e74198fe66fe8047b5768e2243 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 8 Jan 2026 22:50:26 +0000 Subject: [PATCH 222/356] Bugfix: wallet/bdb: Actually set db refcount to -1 after lsn_reset in BerkeleyEnvironment::Flush --- src/wallet/bdb.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index 81e9395178..9b25fc8d73 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -669,7 +669,8 @@ void BerkeleyEnvironment::Flush(bool fShutdown) bool no_dbs_accessed = true; for (auto& db_it : m_databases) { const fs::path& filename = db_it.first; - int nRefCount = db_it.second.get().m_refcount; + BerkeleyDatabase& database = db_it.second.get(); + const int nRefCount = database.m_refcount; if (nRefCount < 0) continue; const std::string strFile = fs::PathToString(filename); LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: Flushing %s (refcount = %d)...\n", strFile, nRefCount); @@ -691,7 +692,7 @@ void BerkeleyEnvironment::Flush(bool fShutdown) } } LogDebug(BCLog::WALLETDB, "BerkeleyEnvironment::Flush: %s closed\n", strFile); - nRefCount = -1; + database.m_refcount = -1; } else { no_dbs_accessed = false; } From 3cb8deb755e724768974e7ee89fb781f957d321b Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 22 Jan 2026 18:04:58 +0000 Subject: [PATCH 223/356] build: Promote incompatible-bdb warning to a fatal error --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8337d69535..603617ecf1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -126,7 +126,7 @@ if(WITH_BDB) "BDB (legacy) wallets opened by this build will not be portable!" ) if(WARN_INCOMPATIBLE_BDB) - message(WARNING "If this is intended, pass \"-DWARN_INCOMPATIBLE_BDB=OFF\".\n" + message(FATAL_ERROR "If this is intended, pass \"-DWARN_INCOMPATIBLE_BDB=OFF\".\n" "Passing \"-DWITH_BDB=OFF\" will suppress this warning." ) endif() From 5f4b4a5860f0e579bc0f408377930b24368dac5d Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 14 Jan 2026 20:32:09 +0000 Subject: [PATCH 224/356] Bugfix: net_processing: Restore missing comma between peer and peeraddr in "receive version message" and "New ___ peer connected" Versions prior to 29.x all included a comma here, and the general format of the message still includes commas between all other fields --- src/net_processing.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/net_processing.cpp b/src/net_processing.cpp index a814346beb..8f0f20b46e 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -3574,10 +3574,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; - LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s\n", + LogDebug(BCLog::NET, "receive version message: %s: version %d, blocks=%d, us=%s, txrelay=%d, peer=%d%s%s%s\n", SanitizeString(cleanSubVer, SAFE_CHARS_DEFAULT, true), pfrom.nVersion, peer->m_starting_height, addrMe.ToStringAddrPort(), fRelay, pfrom.GetId(), - pfrom.LogIP(fLogIPs), (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); + fLogIPs ? "," : "", pfrom.LogIP(fLogIPs), + (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); peer->m_time_offset = NodeSeconds{std::chrono::seconds{nTime}} - Now(); if (!pfrom.IsInboundConn()) { @@ -3617,11 +3618,12 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // can be triggered by an attacker at high rate. if (!pfrom.IsInboundConn() || LogAcceptCategory(BCLog::NET, BCLog::Level::Debug)) { const auto mapped_as{m_connman.GetMappedAS(pfrom.addr)}; - LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s\n", + LogPrintf("New %s %s peer connected: version: %d, blocks=%d, peer=%d%s%s%s\n", pfrom.ConnectionTypeAsString(), TransportTypeAsString(pfrom.m_transport->GetInfo().transport_type), pfrom.nVersion.load(), peer->m_starting_height, - pfrom.GetId(), pfrom.LogIP(fLogIPs), + pfrom.GetId(), + fLogIPs ? "," : "", pfrom.LogIP(fLogIPs), (mapped_as ? strprintf(", mapped_as=%d", mapped_as) : "")); } From 607799b6dbd265e21448c9f1167e382726775710 Mon Sep 17 00:00:00 2001 From: furszy Date: Mon, 29 Dec 2025 14:23:24 -0500 Subject: [PATCH 225/356] wallet: handle non-writable db directories 1) For wallet load, this fixes a crash. We currently allow loading wallets located on non-writable directories. This is problematic because the node crashes on any subsequent write. E.g. generating a block is enough to trigger it. 2) For wallet creation, this improves the returned error msg. Before: creating a wallet would return a generic error: "SQLiteDatabase: Failed to open database: unable to open database file" After: creating a wallet returns: "SQLiteDatabase: Failed to open database in directory : directory is not writable" Github-Pull: bitcoin/bitcoin#34176 Rebased-From: 5301b77a7c9dec1dfa7ba0f08a151c1c9dc22fdb --- src/util/fs_helpers.cpp | 24 ++++++++++++++++++++++++ src/util/fs_helpers.h | 8 ++++++++ src/wallet/sqlite.cpp | 4 ++++ 3 files changed, 36 insertions(+) diff --git a/src/util/fs_helpers.cpp b/src/util/fs_helpers.cpp index 4d06afe144..061f2c79b0 100644 --- a/src/util/fs_helpers.cpp +++ b/src/util/fs_helpers.cpp @@ -8,6 +8,7 @@ #include // IWYU pragma: keep #include +#include #include #include #include @@ -309,3 +310,26 @@ std::optional InterpretPermString(const std::string& s) return std::nullopt; } } + +bool IsDirWritable(const fs::path& dir_path) +{ + // Attempt to create a tmp file in the directory + if (!fs::is_directory(dir_path)) throw std::runtime_error(strprintf("Path %s is not a directory", fs::PathToString(dir_path))); + FastRandomContext rng; + const auto tmp = dir_path / fs::PathFromString(strprintf(".tmp_%d", rng.rand64())); + + const char* mode; +#ifdef __MINGW64__ + mode = "w"; // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210 +#else + mode = "wx"; +#endif + + if (const auto created{fsbridge::fopen(tmp, mode)}) { + std::fclose(created); + std::error_code ec; + fs::remove(tmp, ec); // clean up, ignore errors + return true; + } + return false; +} diff --git a/src/util/fs_helpers.h b/src/util/fs_helpers.h index 28dd6d979d..1bbd1f6021 100644 --- a/src/util/fs_helpers.h +++ b/src/util/fs_helpers.h @@ -76,6 +76,14 @@ std::string PermsToSymbolicString(fs::perms p); */ std::optional InterpretPermString(const std::string& s); +/** Check if a directory is writable by creating a temporary file on it. + * + * @param[in] dir_path Path of the directory to test + * @return true if a temporary file could be created and removed, false otherwise. + * @throw std::runtime_error if dir_path is not a directory. + */ +bool IsDirWritable(const fs::path& dir_path); + #ifdef WIN32 fs::path GetSpecialFolderPath(int nFolder, bool fCreate = true); #endif diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp index 896a2fc0f3..46eb5d9d8a 100644 --- a/src/wallet/sqlite.cpp +++ b/src/wallet/sqlite.cpp @@ -254,7 +254,11 @@ void SQLiteDatabase::Open() if (m_db == nullptr) { if (!m_mock) { TryCreateDirectories(m_dir_path); + if (!IsDirWritable(m_dir_path)) { + throw std::runtime_error(strprintf("SQLiteDatabase: Failed to open database in directory '%s': directory is not writable", fs::PathToString(m_dir_path))); + } } + int ret = sqlite3_open_v2(m_file_path.c_str(), &m_db, flags, nullptr); if (ret != SQLITE_OK) { throw std::runtime_error(strprintf("SQLiteDatabase: Failed to open database: %s\n", sqlite3_errstr(ret))); From 44e9edefc20962a18b3511aa180964c3cfbe114e Mon Sep 17 00:00:00 2001 From: furszy Date: Mon, 29 Dec 2025 14:34:09 -0500 Subject: [PATCH 226/356] test: add coverage for wallet creation in non-writable directory Github-Pull: bitcoin/bitcoin#34176 Rebased-From: ff3d1ad2a2f58210cdee8eb4fd5c3ba0483d6d45 --- test/functional/test_framework/util.py | 10 ++++++++++ test/functional/wallet_createwallet.py | 21 +++++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 6cf04a2e26..c52b87feb5 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -599,3 +599,13 @@ def find_vout_for_address(node, txid, addr): if addr == tx["vout"][i]["scriptPubKey"]["address"]: return i raise RuntimeError("Vout not found for address: txid=%s, addr=%s" % (txid, addr)) + +def is_dir_writable(dir_path: pathlib.Path) -> bool: + """Return True if we can create a file in the directory, False otherwise""" + try: + tmp = dir_path / f".tmp_{random.randrange(1 << 32)}" + tmp.touch() + tmp.unlink() + return True + except OSError: + return False diff --git a/test/functional/wallet_createwallet.py b/test/functional/wallet_createwallet.py index 06386bbbe9..c92ca69c28 100755 --- a/test/functional/wallet_createwallet.py +++ b/test/functional/wallet_createwallet.py @@ -4,6 +4,8 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test createwallet arguments. """ +import os +import stat from test_framework.address import key_to_p2wpkh from test_framework.descriptors import descsum_create @@ -11,6 +13,7 @@ from test_framework.util import ( assert_equal, assert_raises_rpc_error, + is_dir_writable, ) from test_framework.wallet_util import generate_keypair, WalletUnlock @@ -29,10 +32,28 @@ def set_test_params(self): def skip_test_if_missing_module(self): self.skip_if_no_wallet() + def test_bad_dir_permissions(self, node): + self.log.info("Test wallet creation failure due to non-writable directory") + wallet_name = "bad_permissions" + dir_path = node.wallets_path / wallet_name + dir_path.mkdir(parents=True) + original_dir_perms = dir_path.stat().st_mode + os.chmod(dir_path, original_dir_perms & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)) + if is_dir_writable(dir_path): + self.log.warning("Skipping non-writable directory test: unable to enforce read-only permissions") + else: + # Run actual test + assert_raises_rpc_error(-4, f"SQLiteDatabase: Failed to open database in directory '{str(dir_path)}': directory is not writable", node.createwallet, wallet_name=wallet_name, descriptors=True) + # Reset directory permissions for cleanup + dir_path.chmod(original_dir_perms) + + def run_test(self): node = self.nodes[0] self.generate(node, 1) # Leave IBD for sethdseed + self.test_bad_dir_permissions(node) + self.log.info("Run createwallet with invalid parameters.") # Run createwallet with invalid parameters. This must not prevent a new wallet with the same name from being created with the correct parameters. assert_raises_rpc_error(-4, "Passphrase provided but private keys are disabled. A passphrase is only used to encrypt private keys, so cannot be used for wallets with private keys disabled.", From fde25e03f3e5b490182d4145c6dc48c3081556da Mon Sep 17 00:00:00 2001 From: furszy Date: Sat, 3 Jan 2026 12:37:45 -0500 Subject: [PATCH 227/356] test: add coverage for loading a wallet in a non-writable directory Previously, wallets in non-writable directories were loaded, leading to crashes on any subsequent write. Github-Pull: bitcoin/bitcoin#34176 Rebased-From: 3e43bce7688ed0f660e74734802068c9fc266412 --- test/functional/wallet_startup.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/test/functional/wallet_startup.py b/test/functional/wallet_startup.py index 6feb00af8e..c6b2def042 100755 --- a/test/functional/wallet_startup.py +++ b/test/functional/wallet_startup.py @@ -6,9 +6,14 @@ Verify that a bitcoind node can maintain list of wallets loading on startup """ +import os +import stat + from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, + assert_raises_rpc_error, + is_dir_writable, ) @@ -28,6 +33,27 @@ def setup_nodes(self): self.add_nodes(self.num_nodes) self.start_nodes() + def test_load_unwritable_wallet(self, node): + self.log.info("Test wallet load failure due to non-writable directory") + wallet_name = "bad_permissions" + + node.createwallet(wallet_name, descriptors=True) + node.unloadwallet(wallet_name) + + dir_path = node.wallets_path / wallet_name + original_dir_perms = dir_path.stat().st_mode + os.chmod(dir_path, original_dir_perms & ~(stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)) + + if is_dir_writable(dir_path): + self.log.warning("Skipping load non-writable directory test: unable to enforce read-only permissions") + else: + # Ensure we don't load a wallet located in a non-writable directory. + # The node will crash later on if we cannot write to disk. + assert_raises_rpc_error(-4, f"SQLiteDatabase: Failed to open database in directory '{str(dir_path)}': directory is not writable", node.loadwallet, wallet_name) + + # Reset directory permissions for cleanup + dir_path.chmod(original_dir_perms) + def run_test(self): self.log.info('Should start without any wallets') assert_equal(self.nodes[0].listwallets(), []) @@ -57,5 +83,7 @@ def run_test(self): self.restart_node(0) assert_equal(set(self.nodes[0].listwallets()), set(('w2', 'w3'))) + self.test_load_unwritable_wallet(self.nodes[0]) + if __name__ == '__main__': WalletStartupTest(__file__).main() From a65bce292bc4f2b80f385addedbd76f30b31d0d7 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 21 Jan 2026 23:11:04 +0000 Subject: [PATCH 228/356] Bugfix: Wallet/bdb: Catch exceptions in MakeBerkeleyDatabase Wallet creation/opening errors turn into RPC_MISC_ERROR without this --- src/wallet/bdb.cpp | 14 ++++++++++++-- test/functional/wallet_multiwallet.py | 2 +- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index 79851dff33..02b17b70b3 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -949,7 +949,7 @@ std::unique_ptr MakeBerkeleyDatabase(const fs::path& path, con { fs::path data_file = BDBDataFile(path); std::unique_ptr db; - { + try { LOCK(cs_db); // Lock env.m_databases until insert in BerkeleyDatabase constructor fs::path data_filename = data_file.filename(); std::shared_ptr env = GetBerkeleyEnv(data_file.parent_path(), options.use_shared_memory); @@ -959,10 +959,20 @@ std::unique_ptr MakeBerkeleyDatabase(const fs::path& path, con return nullptr; } db = std::make_unique(std::move(env), std::move(data_filename), options); + } catch (const std::runtime_error& e) { + status = DatabaseStatus::FAILED_LOAD; + error = Untranslated(e.what()); + return nullptr; } - if (options.verify && !db->Verify(error)) { + try { + if (options.verify && !db->Verify(error)) { + status = DatabaseStatus::FAILED_VERIFY; + return nullptr; + } + } catch (const std::runtime_error& e) { status = DatabaseStatus::FAILED_VERIFY; + error = Untranslated(e.what()); return nullptr; } diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index e241edd3fa..073b45dc2d 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -142,7 +142,7 @@ def wallet_file(name): assert_equal(set(node.listwallets()), set(wallet_names)) # should raise rpc error if wallet path can't be created - err_code = -4 if self.options.descriptors else -1 + err_code = -4 assert_raises_rpc_error(err_code, "filesystem error:" if platform.system() != 'Windows' else "create_directories:", self.nodes[0].createwallet, "w8/bad") # check that all requested wallets were created From 0a7953b83d702ee0a25db12dfdcbbb4ee83fc0ba Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 21 Jan 2026 23:11:50 +0000 Subject: [PATCH 229/356] Wallet/bdb: improve error msg when db directory is not writable --- src/wallet/bdb.cpp | 3 +++ test/functional/wallet_createwallet.py | 2 +- test/functional/wallet_startup.py | 4 ++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index 02b17b70b3..90a9ab55e3 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -151,6 +151,9 @@ bool BerkeleyEnvironment::Open(bilingual_str& err) fs::path pathIn = fs::PathFromString(strPath); TryCreateDirectories(pathIn); + if (!IsDirWritable(pathIn)) { + throw std::runtime_error(strprintf("BerkeleyEnvironment: Failed to open database in directory '%s': directory is not writable", fs::PathToString(pathIn))); + } if (util::LockDirectory(pathIn, ".walletlock") != util::LockResult::Success) { LogPrintf("Cannot obtain a lock on wallet directory %s. Another instance may be using it.\n", strPath); err = strprintf(_("Error initializing wallet database environment %s!"), fs::quoted(fs::PathToString(Directory()))); diff --git a/test/functional/wallet_createwallet.py b/test/functional/wallet_createwallet.py index c92ca69c28..758ea30b7d 100755 --- a/test/functional/wallet_createwallet.py +++ b/test/functional/wallet_createwallet.py @@ -43,7 +43,7 @@ def test_bad_dir_permissions(self, node): self.log.warning("Skipping non-writable directory test: unable to enforce read-only permissions") else: # Run actual test - assert_raises_rpc_error(-4, f"SQLiteDatabase: Failed to open database in directory '{str(dir_path)}': directory is not writable", node.createwallet, wallet_name=wallet_name, descriptors=True) + assert_raises_rpc_error(-4, f"Failed to open database in directory '{str(dir_path)}': directory is not writable", node.createwallet, wallet_name=wallet_name) # Reset directory permissions for cleanup dir_path.chmod(original_dir_perms) diff --git a/test/functional/wallet_startup.py b/test/functional/wallet_startup.py index c6b2def042..f8cb0caa7c 100755 --- a/test/functional/wallet_startup.py +++ b/test/functional/wallet_startup.py @@ -37,7 +37,7 @@ def test_load_unwritable_wallet(self, node): self.log.info("Test wallet load failure due to non-writable directory") wallet_name = "bad_permissions" - node.createwallet(wallet_name, descriptors=True) + node.createwallet(wallet_name) node.unloadwallet(wallet_name) dir_path = node.wallets_path / wallet_name @@ -49,7 +49,7 @@ def test_load_unwritable_wallet(self, node): else: # Ensure we don't load a wallet located in a non-writable directory. # The node will crash later on if we cannot write to disk. - assert_raises_rpc_error(-4, f"SQLiteDatabase: Failed to open database in directory '{str(dir_path)}': directory is not writable", node.loadwallet, wallet_name) + assert_raises_rpc_error(-4, f"Failed to open database in directory '{str(dir_path)}': directory is not writable", node.loadwallet, wallet_name) # Reset directory permissions for cleanup dir_path.chmod(original_dir_perms) From 9a70def42c5fbe4262c381bb0e7ad066743b5587 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 23 Jan 2026 21:23:53 +0000 Subject: [PATCH 230/356] depends: Qt 5.15.18 --- depends/packages/qt.mk | 8 ++++---- doc/dependencies.md | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 309117148d..ef467a6eb2 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -1,9 +1,9 @@ package=qt -$(package)_version=5.15.17 +$(package)_version=5.15.18 $(package)_download_path=https://download.qt.io/archive/qt/5.15/$($(package)_version)/submodules $(package)_suffix=everywhere-opensource-src-$($(package)_version).tar.xz $(package)_file_name=qtbase-$($(package)_suffix) -$(package)_sha256_hash=db1513cbb3f4a5bd2229f759c0839436f7fe681a800ff2bc34c4960b09e756ff +$(package)_sha256_hash=7b632550ea1048fc10c741e46e2e3b093e5ca94dfa6209e9e0848800e247023b $(package)_linux_dependencies=freetype fontconfig libxcb libxkbcommon libxcb_util libxcb_util_render libxcb_util_keysyms libxcb_util_image libxcb_util_wm $(package)_qt_libs=corelib network widgets gui plugins testlib $(package)_linguist_tools = lrelease lupdate lconvert @@ -25,10 +25,10 @@ $(package)_patches += windows_lto.patch $(package)_patches += darwin_no_libm.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) -$(package)_qttranslations_sha256_hash=309ddea3d2696042001c5d0ef1ea86cec8d0323bc3a0b942b65aaaf5a5d483c9 +$(package)_qttranslations_sha256_hash=e5625757913caf66a9d702ba102ae92cb165d8dde17759b6de9fdea84a1f857f $(package)_qttools_file_name=qttools-$($(package)_suffix) -$(package)_qttools_sha256_hash=433006eb6732bb7f546f63e0d1890477a9dd2f889228f30aa881aed5dfc9bfc6 +$(package)_qttools_sha256_hash=931e0969d9f9d8f233e5e9bf9db0cea9ce9914d49982f1795fe6191010113568 $(package)_extra_sources = $($(package)_qttranslations_file_name) $(package)_extra_sources += $($(package)_qttools_file_name) diff --git a/doc/dependencies.md b/doc/dependencies.md index e582b2d6eb..f8c663e4ae 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -30,7 +30,7 @@ Bitcoin Core requires one of the following compilers. | [Fontconfig](../depends/packages/fontconfig.mk) (gui) | [link](https://www.freedesktop.org/wiki/Software/fontconfig/) | [2.12.6](https://github.com/bitcoin/bitcoin/pull/23495) | 2.6 | Yes | | [FreeType](../depends/packages/freetype.mk) (gui) | [link](https://freetype.org) | [2.11.0](https://github.com/bitcoin/bitcoin/commit/01544dd78ccc0b0474571da854e27adef97137fb) | 2.3.0 | Yes | | [qrencode](../depends/packages/qrencode.mk) (gui) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | N/A | No | -| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/archive/qt/) | 5.15.17 | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | +| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/archive/qt/) | 5.15.18 | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | | [ZeroMQ](../depends/packages/zeromq.mk) (notifications) | [link](https://github.com/zeromq/libzmq/releases) | [4.3.4](https://github.com/bitcoin/bitcoin/pull/23956) | 4.0.0 | No | | [Berkeley DB](../depends/packages/bdb.mk) (legacy wallet) | [link](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.30 | 4.8.x | No | | [SQLite](../depends/packages/sqlite.mk) (wallet) | [link](https://sqlite.org) | [3.38.5](https://github.com/bitcoin/bitcoin/pull/25378) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No | From 942cfca027c24d2d6c242e19224cee85fb5a5bef Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Sat, 6 Dec 2025 19:36:46 +1000 Subject: [PATCH 231/356] net: Pass time to InactivityChecks fuctions We run InactivityChecks() for each node everytime poll()/select() every 50ms or so. Rather than calculating the current time once for each node, just calculate it once and reuse it. Github-Pull: bitcoin/bitcoin#34025 Rebased-From: cea443e246185c0aa89a8b5dd9f78f6ab09af523 --- src/net.cpp | 13 +++++++------ src/net.h | 4 ++-- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 735985a841..7c5824d021 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -1991,16 +1991,15 @@ void CConnman::NotifyNumConnectionsChanged() } } -bool CConnman::ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const +bool CConnman::ShouldRunInactivityChecks(const CNode& node, std::chrono::microseconds now) const { return node.m_connected + m_peer_connect_timeout < now; } -bool CConnman::InactivityCheck(const CNode& node) const +bool CConnman::InactivityCheck(const CNode& node, std::chrono::microseconds now) const { // Tests that see disconnects after using mocktime can start nodes with a // large timeout. For example, -peertimeout=999999999. - const auto now{GetTime()}; const auto last_send{node.m_last_send.load()}; const auto last_recv{node.m_last_recv.load()}; @@ -2024,7 +2023,7 @@ bool CConnman::InactivityCheck(const CNode& node) const if (now > last_send + TIMEOUT_INTERVAL) { LogDebug(BCLog::NET, - "socket sending timeout: %is, %s\n", count_seconds(now - last_send), + "socket sending timeout: %is, %s\n", Ticks(now - last_send), node.DisconnectMsg(fLogIPs) ); return true; @@ -2032,7 +2031,7 @@ bool CConnman::InactivityCheck(const CNode& node) const if (now > last_recv + TIMEOUT_INTERVAL) { LogDebug(BCLog::NET, - "socket receive timeout: %is, %s\n", count_seconds(now - last_recv), + "socket receive timeout: %is, %s\n", Ticks(now - last_recv), node.DisconnectMsg(fLogIPs) ); return true; @@ -2114,6 +2113,8 @@ void CConnman::SocketHandlerConnected(const std::vector& nodes, { AssertLockNotHeld(m_total_bytes_sent_mutex); + auto now = GetTime(); + for (CNode* pnode : nodes) { if (interruptNet) return; @@ -2204,7 +2205,7 @@ void CConnman::SocketHandlerConnected(const std::vector& nodes, } } - if (InactivityCheck(*pnode)) pnode->fDisconnect = true; + if (InactivityCheck(*pnode, now)) pnode->fDisconnect = true; } } diff --git a/src/net.h b/src/net.h index e64d9a67f4..72832928a4 100644 --- a/src/net.h +++ b/src/net.h @@ -1268,7 +1268,7 @@ class CConnman void WakeMessageHandler() EXCLUSIVE_LOCKS_REQUIRED(!mutexMsgProc); /** Return true if we should disconnect the peer for failing an inactivity check. */ - bool ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const; + bool ShouldRunInactivityChecks(const CNode& node, std::chrono::microseconds now) const; bool MultipleManualOrFullOutboundConns(Network net) const EXCLUSIVE_LOCKS_REQUIRED(m_nodes_mutex); @@ -1318,7 +1318,7 @@ class CConnman void DisconnectNodes() EXCLUSIVE_LOCKS_REQUIRED(!m_reconnections_mutex, !m_nodes_mutex); void NotifyNumConnectionsChanged(); /** Return true if the peer is inactive and should be disconnected. */ - bool InactivityCheck(const CNode& node) const; + bool InactivityCheck(const CNode& node, std::chrono::microseconds now) const; /** * Generate a collection of sockets to check for IO readiness. From 905db18969aa8e629325031c68f9ea153c4fa23b Mon Sep 17 00:00:00 2001 From: Anthony Towns Date: Sun, 7 Dec 2025 04:48:12 +1000 Subject: [PATCH 232/356] net: Cache -capturemessages setting Github-Pull: bitcoin/bitcoin#34025 Rebased-From: 5f5c1ea01955d277581f6c2acbeb982949088960 --- src/init.cpp | 1 + src/net.cpp | 2 +- src/net.h | 10 ++++++++++ src/test/net_tests.cpp | 4 ++-- 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 7fdbf75dc6..2cd86352e5 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1915,6 +1915,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) connOptions.m_peer_connect_timeout = peer_connect_timeout; connOptions.whitelist_forcerelay = args.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY); connOptions.whitelist_relay = args.GetBoolArg("-whitelistrelay", DEFAULT_WHITELISTRELAY); + connOptions.m_capture_messages = args.GetBoolArg("-capturemessages", false); // Port to bind to if `-bind=addr` is provided without a `:port` suffix. const uint16_t default_bind_port = diff --git a/src/net.cpp b/src/net.cpp index 7c5824d021..89879adf2c 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -3873,7 +3873,7 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) AssertLockNotHeld(m_total_bytes_sent_mutex); size_t nMessageSize = msg.data.size(); LogDebug(BCLog::NET, "sending %s (%d bytes) peer=%d\n", msg.m_type, nMessageSize, pnode->GetId()); - if (gArgs.GetBoolArg("-capturemessages", false)) { + if (m_capture_messages) { CaptureMessage(pnode->addr, msg.m_type, msg.data, /*is_incoming=*/false); } diff --git a/src/net.h b/src/net.h index 72832928a4..81efdb815d 100644 --- a/src/net.h +++ b/src/net.h @@ -1078,6 +1078,7 @@ class CConnman bool m_i2p_accept_incoming; bool whitelist_forcerelay = DEFAULT_WHITELISTFORCERELAY; bool whitelist_relay = DEFAULT_WHITELISTRELAY; + bool m_capture_messages = false; }; void Init(const Options& connOptions) EXCLUSIVE_LOCKS_REQUIRED(!m_added_nodes_mutex, !m_total_bytes_sent_mutex) @@ -1115,8 +1116,12 @@ class CConnman m_onion_binds = connOptions.onion_binds; whitelist_forcerelay = connOptions.whitelist_forcerelay; whitelist_relay = connOptions.whitelist_relay; + m_capture_messages = connOptions.m_capture_messages; } + // test only + void SetCaptureMessages(bool cap) { m_capture_messages = cap; } + CConnman(uint64_t seed0, uint64_t seed1, AddrMan& addrman, const NetGroupManager& netgroupman, const CChainParams& params, bool network_active = true); @@ -1591,6 +1596,11 @@ class CConnman */ bool whitelist_relay; + /** + * flag for whether messages are captured + */ + bool m_capture_messages{false}; + /** * Mutex protecting m_i2p_sam_sessions. */ diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 62e541b5b3..8ebca22492 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -807,7 +807,7 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) // Pretend that we bound to this port. const uint16_t bind_port = 20001; m_node.args->ForceSetArg("-bind", strprintf("3.4.5.6:%u", bind_port)); - m_node.args->ForceSetArg("-capturemessages", "1"); + m_node.connman->SetCaptureMessages(true); // Our address:port as seen from the peer - 2.3.4.5:20002 (different from the above). in_addr peer_us_addr; @@ -884,7 +884,7 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) CaptureMessage = CaptureMessageOrig; chainman.ResetIbd(); - m_node.args->ForceSetArg("-capturemessages", "0"); + m_node.connman->SetCaptureMessages(false); m_node.args->ForceSetArg("-bind", ""); } From 17f00550c831b4f45cddccfc851a653003d7c53f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Sun, 11 Jan 2026 23:57:55 +0100 Subject: [PATCH 233/356] test: cover IBD exit conditions Add a unit test that exercises the `IsInitialBlockDownload()` decision matrix by varying the cached latch, `BlockManager::LoadingBlocks()`, and tip work/recency inputs. This documents the current latching behavior and provides a baseline for later refactors. Github-Pull: bitcoin/bitcoin#34253 Rebased-From: 8be54e3b19677b02e19d054a4a5b2f1968bb1c46 --- .../validation_chainstatemanager_tests.cpp | 43 +++++++++++++++++-- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 227d7d4633..53e1ec5fa4 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -139,11 +139,11 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_rebalance_caches, TestChain100Setup) /*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false); // Reset IBD state so IsInitialBlockDownload() returns true and causes - // MaybeRebalancesCaches() to prioritize the snapshot chainstate, giving it + // MaybeRebalanceCaches() to prioritize the snapshot chainstate, giving it // more cache space than the snapshot chainstate. Calling ResetIbd() is // necessary because m_cached_finished_ibd is already latched to true before - // the test starts due to the test setup. After ResetIbd() is called. - // IsInitialBlockDownload will return true because at this point the active + // the test starts due to the test setup. After ResetIbd() is called, + // IsInitialBlockDownload() will return true because at this point the active // chainstate has a null chain tip. static_cast(manager).ResetIbd(); @@ -159,6 +159,43 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_rebalance_caches, TestChain100Setup) BOOST_CHECK_CLOSE(c2.m_coinsdb_cache_size_bytes, max_cache * 0.95, 1); } +BOOST_FIXTURE_TEST_CASE(chainstatemanager_ibd_exit_after_loading_blocks, ChainTestingSetup) +{ + CBlockIndex tip; + ChainstateManager& chainman{*Assert(m_node.chainman)}; + auto apply{[&](bool cached_finished_ibd, bool loading_blocks, bool tip_exists, bool enough_work, bool tip_recent) { + LOCK(::cs_main); + chainman.ResetChainstates(); + chainman.InitializeChainstate(m_node.mempool.get()); + + const auto recent_time{Now() - chainman.m_options.max_tip_age}; + + chainman.m_cached_finished_ibd.store(cached_finished_ibd, std::memory_order_relaxed); + chainman.m_blockman.m_importing = loading_blocks; + if (tip_exists) { + tip.nChainWork = chainman.MinimumChainWork() - (enough_work ? 0 : 1); + tip.nTime = (recent_time - (tip_recent ? 0h : 100h)).time_since_epoch().count(); + chainman.ActiveChain().SetTip(tip); + } else { + assert(!chainman.ActiveChain().Tip()); + } + }}; + + for (const bool cached_finished_ibd : {false, true}) { + for (const bool loading_blocks : {false, true}) { + for (const bool tip_exists : {false, true}) { + for (const bool enough_work : {false, true}) { + for (const bool tip_recent : {false, true}) { + apply(cached_finished_ibd, loading_blocks, tip_exists, enough_work, tip_recent); + const bool expected_ibd = !cached_finished_ibd && (loading_blocks || !tip_exists || !enough_work || !tip_recent); + BOOST_CHECK_EQUAL(chainman.IsInitialBlockDownload(), expected_ibd); + } + } + } + } + } +} + struct SnapshotTestSetup : TestChain100Setup { // Run with coinsdb on the filesystem to support, e.g., moving invalidated // chainstate dirs to "*_invalid". From 1741f39fcb2e0cc2a8107737669bc73549269f35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Sun, 11 Jan 2026 23:57:13 +0100 Subject: [PATCH 234/356] validation: make `IsInitialBlockDownload()` lock-free `ChainstateManager::IsInitialBlockDownload()` is queried on hot paths and previously acquired `cs_main` internally, contributing to lock contention. Cache the IBD status in `m_cached_is_ibd`, and introduce `ChainstateManager::UpdateIBDStatus()` to latch it once block loading has finished and the current chain tip has enough work and is recent. Call the updater after tip updates and after `ImportBlocks()` completes. Since `IsInitialBlockDownload()` no longer updates the cache, drop `mutable` from `m_cached_is_ibd` and only update it from `UpdateIBDStatus()` under `cs_main`. Update the new unit test to showcase the new `UpdateIBDStatus()`. Co-authored-by: Patrick Strateman Co-authored-by: Martin Zumsande Github-Pull: bitcoin/bitcoin#34253 Rebased-From: 557b41a38ccf2929ca1e5271db1701e5fbe781af --- src/init.cpp | 1 + .../validation_chainstatemanager_tests.cpp | 1 + src/validation.cpp | 31 ++++++++++--------- src/validation.h | 20 ++++++++++-- 4 files changed, 36 insertions(+), 17 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 42331d37e8..c4a0aa51f6 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1691,6 +1691,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) chainman.m_thread_load = std::thread(&util::TraceThread, "initload", [=, &chainman, &args, &node] { // Import blocks ImportBlocks(chainman, vImportFiles); + WITH_LOCK(::cs_main, chainman.UpdateIBDStatus()); if (args.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) { LogPrintf("Stopping after block import\n"); StartShutdown(); diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 53e1ec5fa4..38a1428858 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -179,6 +179,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_ibd_exit_after_loading_blocks, ChainTe } else { assert(!chainman.ActiveChain().Tip()); } + chainman.UpdateIBDStatus(); }}; for (const bool cached_finished_ibd : {false, true}) { diff --git a/src/validation.cpp b/src/validation.cpp index 8a9af1b04f..ce3e6e96da 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1704,36 +1704,36 @@ void Chainstate::InitCoinsCache(size_t cache_size_bytes) m_coins_views->InitCache(); } -// Note that though this is marked const, we may end up modifying `m_cached_finished_ibd`, which -// is a performance-related implementation detail. This function must be marked -// `const` so that `CValidationInterface` clients (which are given a `const Chainstate*`) -// can call it. +// This function must be marked `const` so that `CValidationInterface` clients +// (which are given a `const Chainstate*`) can call it. +// +// It is lock-free and depends on `m_cached_finished_ibd`, which is latched by +// `UpdateIBDStatus()`. // bool ChainstateManager::IsInitialBlockDownload() const { - // Optimization: pre-test latch before taking the lock. - if (m_cached_finished_ibd.load(std::memory_order_relaxed)) - return false; + return !m_cached_finished_ibd.load(std::memory_order_relaxed); +} - LOCK(cs_main); +void ChainstateManager::UpdateIBDStatus() +{ if (m_cached_finished_ibd.load(std::memory_order_relaxed)) - return false; + return; if (m_blockman.LoadingBlocks()) { - return true; + return; } CChain& chain{ActiveChain()}; if (chain.Tip() == nullptr) { - return true; + return; } if (chain.Tip()->nChainWork < MinimumChainWork()) { - return true; + return; } if (chain.Tip()->Time() < Now() - m_options.max_tip_age) { - return true; + return; } LogPrintf("Leaving InitialBlockDownload (latching to false)\n"); m_cached_finished_ibd.store(true, std::memory_order_relaxed); - return false; } void Chainstate::CheckForkWarningConditions() @@ -2819,6 +2819,7 @@ bool Chainstate::DisconnectTip(BlockValidationState& state, DisconnectedBlockTra } m_chain.SetTip(*pindexDelete->pprev); + m_chainman.UpdateIBDStatus(); UpdateTip(pindexDelete->pprev); // Let wallets know transactions went from 1-confirmed to @@ -2947,6 +2948,7 @@ bool Chainstate::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, } // Update m_chain & related variables. m_chain.SetTip(*pindexNew); + m_chainman.UpdateIBDStatus(); UpdateTip(pindexNew); const auto time_6{SteadyClock::now()}; @@ -4267,6 +4269,7 @@ bool Chainstate::LoadChainTip() return false; } m_chain.SetTip(*pindex); + m_chainman.UpdateIBDStatus(); tip = m_chain.Tip(); // Make sure our chain tip before shutting down scores better than any other candidate diff --git a/src/validation.h b/src/validation.h index d674960e26..bffa38f09c 100644 --- a/src/validation.h +++ b/src/validation.h @@ -941,10 +941,11 @@ class ChainstateManager * Whether initial block download has ended and IsInitialBlockDownload * should return false from now on. * - * Mutable because we need to be able to mark IsInitialBlockDownload() - * const, which latches this for caching purposes. + * This value is used for lock-free IBD checks, and latches from true to + * false once block loading has finished and the current chain tip has + * enough work and is recent. */ - mutable std::atomic m_cached_finished_ibd{false}; + std::atomic m_cached_finished_ibd{false}; /** * Every received block is assigned a unique and increasing identifier, so we @@ -1054,6 +1055,19 @@ class ChainstateManager return BackgroundSyncInProgress() ? m_ibd_chainstate->m_chain.Tip() : nullptr; } + /** + * Update and possibly latch the IBD status. + * + * If block loading has finished and the current chain tip has enough work + * and is recent, set `m_cached_is_ibd` to false. This function never sets + * the flag back to true. + * + * This should be called after operations that may affect IBD exit + * conditions (e.g. after updating the active chain tip, or after + * `ImportBlocks()` finishes). + */ + void UpdateIBDStatus() EXCLUSIVE_LOCKS_REQUIRED(cs_main); + node::BlockMap& BlockIndex() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); From adeacad1da24be7ec5f8737a74ae3625173e2ca6 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Mon, 26 Jan 2026 18:42:46 +0000 Subject: [PATCH 235/356] Diff-minimise --- .../validation_chainstatemanager_tests.cpp | 6 +++--- src/validation.cpp | 19 +++++++------------ src/validation.h | 9 ++++----- 3 files changed, 14 insertions(+), 20 deletions(-) diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 38a1428858..00f7f64855 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -139,11 +139,11 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_rebalance_caches, TestChain100Setup) /*cache_size_bytes=*/1 << 23, /*in_memory=*/true, /*should_wipe=*/false); // Reset IBD state so IsInitialBlockDownload() returns true and causes - // MaybeRebalanceCaches() to prioritize the snapshot chainstate, giving it + // MaybeRebalancesCaches() to prioritize the snapshot chainstate, giving it // more cache space than the snapshot chainstate. Calling ResetIbd() is // necessary because m_cached_finished_ibd is already latched to true before - // the test starts due to the test setup. After ResetIbd() is called, - // IsInitialBlockDownload() will return true because at this point the active + // the test starts due to the test setup. After ResetIbd() is called. + // IsInitialBlockDownload will return true because at this point the active // chainstate has a null chain tip. static_cast(manager).ResetIbd(); diff --git a/src/validation.cpp b/src/validation.cpp index ce3e6e96da..8fb749ea7e 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1704,36 +1704,31 @@ void Chainstate::InitCoinsCache(size_t cache_size_bytes) m_coins_views->InitCache(); } -// This function must be marked `const` so that `CValidationInterface` clients -// (which are given a `const Chainstate*`) can call it. -// -// It is lock-free and depends on `m_cached_finished_ibd`, which is latched by -// `UpdateIBDStatus()`. -// bool ChainstateManager::IsInitialBlockDownload() const { return !m_cached_finished_ibd.load(std::memory_order_relaxed); } -void ChainstateManager::UpdateIBDStatus() +bool ChainstateManager::UpdateIBDStatus() { if (m_cached_finished_ibd.load(std::memory_order_relaxed)) - return; + return false; if (m_blockman.LoadingBlocks()) { - return; + return true; } CChain& chain{ActiveChain()}; if (chain.Tip() == nullptr) { - return; + return true; } if (chain.Tip()->nChainWork < MinimumChainWork()) { - return; + return true; } if (chain.Tip()->Time() < Now() - m_options.max_tip_age) { - return; + return true; } LogPrintf("Leaving InitialBlockDownload (latching to false)\n"); m_cached_finished_ibd.store(true, std::memory_order_relaxed); + return false; } void Chainstate::CheckForkWarningConditions() diff --git a/src/validation.h b/src/validation.h index bffa38f09c..2454bcb635 100644 --- a/src/validation.h +++ b/src/validation.h @@ -941,11 +941,10 @@ class ChainstateManager * Whether initial block download has ended and IsInitialBlockDownload * should return false from now on. * - * This value is used for lock-free IBD checks, and latches from true to - * false once block loading has finished and the current chain tip has - * enough work and is recent. + * Mutable because we need to be able to mark IsInitialBlockDownload() + * const, which latches this for caching purposes. */ - std::atomic m_cached_finished_ibd{false}; + mutable std::atomic m_cached_finished_ibd{false}; /** * Every received block is assigned a unique and increasing identifier, so we @@ -1066,7 +1065,7 @@ class ChainstateManager * conditions (e.g. after updating the active chain tip, or after * `ImportBlocks()` finishes). */ - void UpdateIBDStatus() EXCLUSIVE_LOCKS_REQUIRED(cs_main); + bool UpdateIBDStatus() EXCLUSIVE_LOCKS_REQUIRED(cs_main); node::BlockMap& BlockIndex() EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { From 3594134e4b3c37d9ff6a955d6cc5ba20f245785b Mon Sep 17 00:00:00 2001 From: John Moffett Date: Tue, 2 Sep 2025 09:26:09 -0400 Subject: [PATCH 236/356] Rename and clear var containing k or -k buf currently holds k or -k and isn't cleared, so clear it and rename to nonce32 to clarify its sensitivity and match how it is named in the corresponding ECDSA sign_inner. Github-Pull: bitcoin-core/secp256k1#1731 Rebased-From: 325d65a8cfae6d4c34098709d0a9e942e4963d03 --- src/secp256k1/src/modules/schnorrsig/main_impl.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/secp256k1/src/modules/schnorrsig/main_impl.h b/src/secp256k1/src/modules/schnorrsig/main_impl.h index 82bba2f597..6ed0b7512f 100644 --- a/src/secp256k1/src/modules/schnorrsig/main_impl.h +++ b/src/secp256k1/src/modules/schnorrsig/main_impl.h @@ -137,7 +137,7 @@ static int secp256k1_schnorrsig_sign_internal(const secp256k1_context* ctx, unsi secp256k1_gej rj; secp256k1_ge pk; secp256k1_ge r; - unsigned char buf[32] = { 0 }; + unsigned char nonce32[32] = { 0 }; unsigned char pk_buf[32]; unsigned char seckey[32]; int ret = 1; @@ -162,8 +162,8 @@ static int secp256k1_schnorrsig_sign_internal(const secp256k1_context* ctx, unsi secp256k1_scalar_get_b32(seckey, &sk); secp256k1_fe_get_b32(pk_buf, &pk.x); - ret &= !!noncefp(buf, msg, msglen, seckey, pk_buf, bip340_algo, sizeof(bip340_algo), ndata); - secp256k1_scalar_set_b32(&k, buf, NULL); + ret &= !!noncefp(nonce32, msg, msglen, seckey, pk_buf, bip340_algo, sizeof(bip340_algo), ndata); + secp256k1_scalar_set_b32(&k, nonce32, NULL); ret &= !secp256k1_scalar_is_zero(&k); secp256k1_scalar_cmov(&k, &secp256k1_scalar_one, !ret); @@ -189,6 +189,7 @@ static int secp256k1_schnorrsig_sign_internal(const secp256k1_context* ctx, unsi secp256k1_scalar_clear(&k); secp256k1_scalar_clear(&sk); secp256k1_memclear(seckey, sizeof(seckey)); + secp256k1_memclear(nonce32, sizeof(nonce32)); secp256k1_gej_clear(&rj); return ret; From 1c64b3c054656f5046c7df860c18eec4824df25c Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Wed, 17 Sep 2025 23:19:12 +0100 Subject: [PATCH 237/356] build: Fix warnings in x86_64 assembly check This change fixes: - `-Wuninitialized` in both Autotools and CMake; - `-Wreturn-type` in CMake only. Github-Pull: bitcoin-core/secp256k1#1749 Rebased-From: ab560078aa9c7e977fade4ceae4a20ef8e5be025 --- src/secp256k1/build-aux/m4/bitcoin_secp.m4 | 2 +- src/secp256k1/cmake/CheckX86_64Assembly.cmake | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 b/src/secp256k1/build-aux/m4/bitcoin_secp.m4 index 048267fa6e..1428d4d9b2 100644 --- a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 +++ b/src/secp256k1/build-aux/m4/bitcoin_secp.m4 @@ -3,7 +3,7 @@ AC_DEFUN([SECP_X86_64_ASM_CHECK],[ AC_MSG_CHECKING(for x86_64 assembly availability) AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include ]],[[ - uint64_t a = 11, tmp; + uint64_t a = 11, tmp = 0; __asm__ __volatile__("movq \@S|@0x100000000,%1; mulq %%rsi" : "+a"(a) : "S"(tmp) : "cc", "%rdx"); ]])], [has_x86_64_asm=yes], [has_x86_64_asm=no]) AC_MSG_RESULT([$has_x86_64_asm]) diff --git a/src/secp256k1/cmake/CheckX86_64Assembly.cmake b/src/secp256k1/cmake/CheckX86_64Assembly.cmake index ae82cd476e..ca18919e06 100644 --- a/src/secp256k1/cmake/CheckX86_64Assembly.cmake +++ b/src/secp256k1/cmake/CheckX86_64Assembly.cmake @@ -4,10 +4,11 @@ function(check_x86_64_assembly) check_c_source_compiles(" #include - int main() + int main(void) { - uint64_t a = 11, tmp; + uint64_t a = 11, tmp = 0; __asm__ __volatile__(\"movq $0x100000000,%1; mulq %%rsi\" : \"+a\"(a) : \"S\"(tmp) : \"cc\", \"%rdx\"); + return 0; } " HAVE_X86_64_ASM) set(HAVE_X86_64_ASM ${HAVE_X86_64_ASM} PARENT_SCOPE) From 657d90d5fb666eff8a5993d42ae92f678449a5a1 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 27 Jan 2026 03:49:42 +0000 Subject: [PATCH 238/356] Diff-minimise --- src/secp256k1/build-aux/m4/bitcoin_secp.m4 | 2 +- src/secp256k1/src/modules/schnorrsig/main_impl.h | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 b/src/secp256k1/build-aux/m4/bitcoin_secp.m4 index 1428d4d9b2..048267fa6e 100644 --- a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 +++ b/src/secp256k1/build-aux/m4/bitcoin_secp.m4 @@ -3,7 +3,7 @@ AC_DEFUN([SECP_X86_64_ASM_CHECK],[ AC_MSG_CHECKING(for x86_64 assembly availability) AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include ]],[[ - uint64_t a = 11, tmp = 0; + uint64_t a = 11, tmp; __asm__ __volatile__("movq \@S|@0x100000000,%1; mulq %%rsi" : "+a"(a) : "S"(tmp) : "cc", "%rdx"); ]])], [has_x86_64_asm=yes], [has_x86_64_asm=no]) AC_MSG_RESULT([$has_x86_64_asm]) diff --git a/src/secp256k1/src/modules/schnorrsig/main_impl.h b/src/secp256k1/src/modules/schnorrsig/main_impl.h index 6ed0b7512f..78c57e553c 100644 --- a/src/secp256k1/src/modules/schnorrsig/main_impl.h +++ b/src/secp256k1/src/modules/schnorrsig/main_impl.h @@ -137,7 +137,7 @@ static int secp256k1_schnorrsig_sign_internal(const secp256k1_context* ctx, unsi secp256k1_gej rj; secp256k1_ge pk; secp256k1_ge r; - unsigned char nonce32[32] = { 0 }; + unsigned char buf[32] = { 0 }; unsigned char pk_buf[32]; unsigned char seckey[32]; int ret = 1; @@ -162,8 +162,8 @@ static int secp256k1_schnorrsig_sign_internal(const secp256k1_context* ctx, unsi secp256k1_scalar_get_b32(seckey, &sk); secp256k1_fe_get_b32(pk_buf, &pk.x); - ret &= !!noncefp(nonce32, msg, msglen, seckey, pk_buf, bip340_algo, sizeof(bip340_algo), ndata); - secp256k1_scalar_set_b32(&k, nonce32, NULL); + ret &= !!noncefp(buf, msg, msglen, seckey, pk_buf, bip340_algo, sizeof(bip340_algo), ndata); + secp256k1_scalar_set_b32(&k, buf, NULL); ret &= !secp256k1_scalar_is_zero(&k); secp256k1_scalar_cmov(&k, &secp256k1_scalar_one, !ret); @@ -189,7 +189,7 @@ static int secp256k1_schnorrsig_sign_internal(const secp256k1_context* ctx, unsi secp256k1_scalar_clear(&k); secp256k1_scalar_clear(&sk); secp256k1_memclear(seckey, sizeof(seckey)); - secp256k1_memclear(nonce32, sizeof(nonce32)); + secp256k1_memclear(buf, sizeof(buf)); secp256k1_gej_clear(&rj); return ret; From 95c8a631026fd062aa0fce1080ccc70ce0939c58 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 25 Sep 2025 13:50:25 +0000 Subject: [PATCH 239/356] Force-disable UPnP and NAT-PMP/PCP if listening is disabled --- src/init.cpp | 7 +++++-- src/qt/optionsmodel.cpp | 8 ++++++-- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 44dc91a7e1..e3de3ce81b 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -903,9 +903,12 @@ void InitParameterInteraction(ArgsManager& args) if (!args.GetBoolArg("-listen", DEFAULT_LISTEN)) { // do not map ports or try to retrieve public IP when not listening (pointless) - if (args.SoftSetBoolArg("-upnp", false)) + if (args.GetBoolArg("-upnp", DEFAULT_UPNP)) { + args.ForceSetArg("-upnp", "0"); LogInfo("parameter interaction: -listen=0 -> setting -upnp=0\n"); - if (args.SoftSetBoolArg("-natpmp", false)) { + } + if (args.GetBoolArg("-natpmp", DEFAULT_NATPMP)) { + args.ForceSetArg("-natpmp", "0"); LogInfo("parameter interaction: -listen=0 -> setting -natpmp=0\n"); } if (args.SoftSetBoolArg("-discover", false)) diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp index 579f648dd0..4ab81e6514 100644 --- a/src/qt/optionsmodel.cpp +++ b/src/qt/optionsmodel.cpp @@ -851,13 +851,17 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value, const std:: case MapPortUPnP: // core option - can be changed on-the-fly if (changed()) { update(value.toBool()); - node().mapPort(value.toBool(), getOption(MapPortNatpmp).toBool()); + if (gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) { + node().mapPort(value.toBool(), getOption(MapPortNatpmp).toBool()); + } } break; case MapPortNatpmp: // core option - can be changed on-the-fly if (changed()) { update(value.toBool()); - node().mapPort(getOption(MapPortUPnP).toBool(), value.toBool()); + if (gArgs.GetBoolArg("-listen", DEFAULT_LISTEN)) { + node().mapPort(getOption(MapPortUPnP).toBool(), value.toBool()); + } } break; case MinimizeOnClose: From a133f3d45cccc64962300164afb496525d12ca90 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 25 Sep 2025 13:21:09 +0000 Subject: [PATCH 240/356] GUI/OptionsDialog: Support insert_at position in CreateOptionUI --- src/qt/optionsdialog.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index a469dc1ce9..3ec58d6986 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -100,6 +100,7 @@ void OptionsDialog::FixTabOrder(QWidget * const o) struct CreateOptionUIOpts { QBoxLayout *horizontal_layout{nullptr}; int stretch{1}; + int insert_at{-1}; int indent{0}; }; @@ -153,7 +154,7 @@ void OptionsDialog::CreateOptionUI(QBoxLayout * const layout, const QString& tex if (opts.stretch) horizontalLayout->addStretch(opts.stretch); - layout->addLayout(horizontalLayout); + layout->insertLayout(opts.insert_at, horizontalLayout); for (auto& o : objs) { o->setProperty("L", QVariant::fromValue((QLayout*)horizontalLayout)); From af69b8bc0e61d611ee32fb354a515eb5adfb1195 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 25 Sep 2025 13:21:34 +0000 Subject: [PATCH 241/356] GUI/OptionsDialog: Move port mapping options indented under allow-incoming checkbox --- src/qt/optionsdialog.cpp | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index 3ec58d6986..0817e4da99 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -299,8 +299,20 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) ui->verticalLayout_Wallet->insertWidget(0, walletrbf); FixTabOrder(walletrbf); + QStyleOptionButton styleoptbtn; + const auto checkbox_indent = ui->allowIncoming->style()->subElementRect(QStyle::SE_CheckBoxIndicator, &styleoptbtn, ui->allowIncoming).width(); + /* Network tab */ QLayoutItem *spacer = ui->verticalLayout_Network->takeAt(ui->verticalLayout_Network->count() - 1); + + prevwidget = ui->allowIncoming; + ui->verticalLayout_Network->removeWidget(ui->mapPortUpnp); + ui->verticalLayout_Network->removeWidget(ui->mapPortNatpmp); + int insert_at = ui->verticalLayout_Network->indexOf(ui->connectSocks); + // NOTE: Re-inserted in bottom-to-top order + CreateOptionUI(ui->verticalLayout_Network, QStringLiteral("%1"), {ui->mapPortNatpmp}, { .insert_at=insert_at, .indent=checkbox_indent, }); + CreateOptionUI(ui->verticalLayout_Network, QStringLiteral("%1"), {ui->mapPortUpnp}, { .insert_at=insert_at, .indent=checkbox_indent, }); + prevwidget = dynamic_cast(ui->verticalLayout_Network->itemAt(ui->verticalLayout_Network->count() - 1))->widget(); blockreconstructionextratxn = new QSpinBox(ui->tabNetwork); @@ -553,9 +565,6 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) dustdynamic_multiplier->setValue(DEFAULT_DUST_RELAY_MULTIPLIER / 1000.0); CreateOptionUI(verticalLayout_Spamfiltering, tr("%1 Automatically adjust the dust limit upward to %2 times:"), {dustdynamic_enable, dustdynamic_multiplier}); - QStyleOptionButton styleoptbtn; - const auto checkbox_indent = dustdynamic_enable->style()->subElementRect(QStyle::SE_CheckBoxIndicator, &styleoptbtn, dustdynamic_enable).width(); - dustdynamic_target = new QRadioButton(groupBox_Spamfiltering); dustdynamic_target_blocks = new QSpinBox(groupBox_Spamfiltering); dustdynamic_target_blocks->setMinimum(2); From bb6edbb770aadf0120307d2fba2d03b4b32d6e6c Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 25 Sep 2025 13:51:00 +0000 Subject: [PATCH 242/356] GUI/OptionsDialog: Disable port mapping checkboxes if listening checkbox is unset --- src/qt/optionsdialog.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index 0817e4da99..fc1eee5341 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -312,6 +312,10 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) // NOTE: Re-inserted in bottom-to-top order CreateOptionUI(ui->verticalLayout_Network, QStringLiteral("%1"), {ui->mapPortNatpmp}, { .insert_at=insert_at, .indent=checkbox_indent, }); CreateOptionUI(ui->verticalLayout_Network, QStringLiteral("%1"), {ui->mapPortUpnp}, { .insert_at=insert_at, .indent=checkbox_indent, }); + connect(ui->allowIncoming, &QPushButton::toggled, ui->mapPortUpnp, &QWidget::setEnabled); + connect(ui->allowIncoming, &QPushButton::toggled, ui->mapPortNatpmp, &QWidget::setEnabled); + ui->mapPortUpnp->setEnabled(ui->allowIncoming->isChecked()); + ui->mapPortNatpmp->setEnabled(ui->allowIncoming->isChecked()); prevwidget = dynamic_cast(ui->verticalLayout_Network->itemAt(ui->verticalLayout_Network->count() - 1))->widget(); From 50ef0564ae095e06dc25e6a97b06d924da8dd0be Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 25 Sep 2025 13:55:13 +0000 Subject: [PATCH 243/356] GUI/OptionsDialog: Rephrase UPnP & PCP/NAT-PMP checkboxes --- src/qt/forms/optionsdialog.ui | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui index f992a304c3..34586db8bf 100644 --- a/src/qt/forms/optionsdialog.ui +++ b/src/qt/forms/optionsdialog.ui @@ -391,7 +391,7 @@ Automatically open the Bitcoin client port on the router. This only works when your router supports UPnP and it is enabled. - Map port using &UPnP + Automatically configure router(s) that support &UPnP @@ -401,7 +401,7 @@ Automatically open the Bitcoin client port on the router. This only works when your router supports PCP or NAT-PMP and it is enabled. The external port could be random. - Map port using PCP or NA&T-PMP + Automatically configure router(s) that support PCP or NA&T-PMP From 9694a973e9f4008b53802b84dda11adb1769d550 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 25 Sep 2025 14:08:49 +0000 Subject: [PATCH 244/356] GUI/OptionsDialog: Refactor UPnP checkbox (move to C++ code) --- src/qt/forms/optionsdialog.ui | 10 ---------- src/qt/optionsdialog.cpp | 19 ++++++++++--------- src/qt/optionsdialog.h | 1 + 3 files changed, 11 insertions(+), 19 deletions(-) diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui index 34586db8bf..c2f9f53665 100644 --- a/src/qt/forms/optionsdialog.ui +++ b/src/qt/forms/optionsdialog.ui @@ -385,16 +385,6 @@ - - - - Automatically open the Bitcoin client port on the router. This only works when your router supports UPnP and it is enabled. - - - Automatically configure router(s) that support &UPnP - - - diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index fc1eee5341..364d13f6b5 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -267,10 +267,6 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) connect(ui->networkPort, SIGNAL(textChanged(const QString&)), this, SLOT(checkLineEdit())); /* Network elements init */ -#ifndef USE_UPNP - ui->mapPortUpnp->setEnabled(false); -#endif - ui->proxyIp->setEnabled(false); ui->proxyPort->setEnabled(false); ui->proxyPort->setValidator(new QIntValidator(1, 65535, this)); @@ -306,15 +302,20 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) QLayoutItem *spacer = ui->verticalLayout_Network->takeAt(ui->verticalLayout_Network->count() - 1); prevwidget = ui->allowIncoming; - ui->verticalLayout_Network->removeWidget(ui->mapPortUpnp); ui->verticalLayout_Network->removeWidget(ui->mapPortNatpmp); int insert_at = ui->verticalLayout_Network->indexOf(ui->connectSocks); // NOTE: Re-inserted in bottom-to-top order CreateOptionUI(ui->verticalLayout_Network, QStringLiteral("%1"), {ui->mapPortNatpmp}, { .insert_at=insert_at, .indent=checkbox_indent, }); - CreateOptionUI(ui->verticalLayout_Network, QStringLiteral("%1"), {ui->mapPortUpnp}, { .insert_at=insert_at, .indent=checkbox_indent, }); - connect(ui->allowIncoming, &QPushButton::toggled, ui->mapPortUpnp, &QWidget::setEnabled); + upnp = new QCheckBox(ui->tabNetwork); + upnp->setText(tr("Automatically configure router(s) that support &UPnP")); + upnp->setToolTip(tr("Automatically open the Bitcoin client port on the router. This only works when your router supports UPnP and it is enabled.")); +#ifndef USE_UPNP + upnp->setEnabled(false); +#endif + CreateOptionUI(ui->verticalLayout_Network, QStringLiteral("%1"), {upnp}, { .insert_at=insert_at, .indent=checkbox_indent, }); + connect(ui->allowIncoming, &QPushButton::toggled, upnp, &QWidget::setEnabled); connect(ui->allowIncoming, &QPushButton::toggled, ui->mapPortNatpmp, &QWidget::setEnabled); - ui->mapPortUpnp->setEnabled(ui->allowIncoming->isChecked()); + upnp->setEnabled(ui->allowIncoming->isChecked()); ui->mapPortNatpmp->setEnabled(ui->allowIncoming->isChecked()); prevwidget = dynamic_cast(ui->verticalLayout_Network->itemAt(ui->verticalLayout_Network->count() - 1))->widget(); @@ -865,7 +866,7 @@ void OptionsDialog::setMapper() /* Network */ mapper->addMapping(ui->networkPort, OptionsModel::NetworkPort); - mapper->addMapping(ui->mapPortUpnp, OptionsModel::MapPortUPnP); + mapper->addMapping(upnp, OptionsModel::MapPortUPnP); mapper->addMapping(ui->mapPortNatpmp, OptionsModel::MapPortNatpmp); mapper->addMapping(ui->allowIncoming, OptionsModel::Listen); mapper->addMapping(ui->enableServer, OptionsModel::Server); diff --git a/src/qt/optionsdialog.h b/src/qt/optionsdialog.h index 231de3aa4a..f64603fa36 100644 --- a/src/qt/optionsdialog.h +++ b/src/qt/optionsdialog.h @@ -123,6 +123,7 @@ private Q_SLOTS: QCheckBox *walletrbf; + QCheckBox *upnp; QSpinBox *blockreconstructionextratxn; QDoubleSpinBox *blockreconstructionextratxnsize; From b54c41e39f0e16e1b60c720eac6bbeb79a40fdf1 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 27 Jan 2026 19:58:28 +0000 Subject: [PATCH 245/356] Bump copyright year to 2026 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3b8716c491..c42956d441 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,7 +28,7 @@ set(CLIENT_VERSION_MINOR 0) set(CLIENT_VERSION_BUILD 0) set(CLIENT_VERSION_RC 0) set(CLIENT_VERSION_IS_RELEASE "true") -set(COPYRIGHT_YEAR "2025") +set(COPYRIGHT_YEAR "2026") # During the enabling of the CXX and CXXOBJ languages, we modify # CMake's compiler/linker invocation strings by appending the content From b2f6128338c0ecee0d2bd805759e284326b8121f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Tue, 13 Jan 2026 12:53:51 +0100 Subject: [PATCH 246/356] psbt: Fix `PSBTInputSignedAndVerified` bounds `assert` The previous `assert` used `>=`, allowing `input_index == psbt.inputs.size()` and out-of-bounds access in `psbt.inputs[input_index]`. Found during review: https://github.com/bitcoin/bitcoin/pull/31650#discussion_r2685892867 Github-Pull: bitcoin/bitcoin#34272 Rebased-From: 2f5b1c5f80590ffa6b5a5bcfb21fddb1dc22e852 --- src/psbt.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/psbt.cpp b/src/psbt.cpp index fe45f2318c..2ac8ec92db 100644 --- a/src/psbt.cpp +++ b/src/psbt.cpp @@ -295,7 +295,7 @@ bool PSBTInputSigned(const PSBTInput& input) bool PSBTInputSignedAndVerified(const PartiallySignedTransaction psbt, unsigned int input_index, const PrecomputedTransactionData* txdata) { CTxOut utxo; - assert(psbt.inputs.size() >= input_index); + assert(input_index < psbt.inputs.size()); const PSBTInput& input = psbt.inputs[input_index]; if (input.non_witness_utxo) { From 36052cffc6f5439736d5b6d813bbbbb8d3666494 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Wed, 14 Jan 2026 12:09:11 +0000 Subject: [PATCH 247/356] qa: Fix Windows logging bug The regex `(.*)` was capturing `\r` from subprocess output on Windows, causing the closing parenthesis in logs to wrap to the next line. Stripping whitespace from the regex match fixes the formatting. Github-Pull: bitcoin/bitcoin#34282 Rebased-From: 979d41bfab248990d7d520873d17fe52daa8d402 --- test/functional/test_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index d78c1c634f..5c23452395 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -690,7 +690,7 @@ def get_next(self): status = "Passed" elif proc.returncode == TEST_EXIT_SKIPPED: status = "Skipped" - skip_reason = re.search(r"Test Skipped: (.*)", stdout).group(1) + skip_reason = re.search(r"Test Skipped: (.*)", stdout).group(1).strip() else: status = "Failed" self.num_running -= 1 From e48a4c18eabc3f2f22d304c0f11de99ece2bc7ee Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 29 Jan 2026 07:31:37 +0000 Subject: [PATCH 248/356] test: Allow testing of check failures This allows specific tests to mock the check behavior to consistently use exceptions instead of aborts for intentionally failing checks in all build configurations. Github-Pull: bitcoin/bitcoin#32588 Rebased-From: fa0dc4bdffb06b6f0c192fe1aa02b4dfdcdc6e15 (partial) --- src/util/check.cpp | 5 +++++ src/util/check.h | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/src/util/check.cpp b/src/util/check.cpp index 1430c0e8e2..81d0b78322 100644 --- a/src/util/check.cpp +++ b/src/util/check.cpp @@ -27,8 +27,13 @@ NonFatalCheckError::NonFatalCheckError(std::string_view msg, std::string_view fi { } +bool g_detail_test_only_CheckFailuresAreExceptionsNotAborts{false}; + void assertion_fail(std::string_view file, int line, std::string_view func, std::string_view assertion) { + if (g_detail_test_only_CheckFailuresAreExceptionsNotAborts) { + throw NonFatalCheckError{assertion, file, line, func}; + } auto str = strprintf("%s:%s %s: Assertion `%s' failed.\n", file, line, func, assertion); fwrite(str.data(), 1, str.size(), stderr); std::abort(); diff --git a/src/util/check.h b/src/util/check.h index efc78915a9..3cc047e88e 100644 --- a/src/util/check.h +++ b/src/util/check.h @@ -21,6 +21,12 @@ constexpr bool G_FUZZING{ #endif }; +extern bool g_detail_test_only_CheckFailuresAreExceptionsNotAborts; +struct test_only_CheckFailuresAreExceptionsNotAborts { + test_only_CheckFailuresAreExceptionsNotAborts() { g_detail_test_only_CheckFailuresAreExceptionsNotAborts = true; }; + ~test_only_CheckFailuresAreExceptionsNotAborts() { g_detail_test_only_CheckFailuresAreExceptionsNotAborts = false; }; +}; + std::string StrFormatInternalBug(std::string_view msg, std::string_view file, int line, std::string_view func); class NonFatalCheckError : public std::runtime_error From 8bc1b55baf1070839c7f2ffd01589dcc2a1b6b07 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 29 Jan 2026 07:32:13 +0000 Subject: [PATCH 249/356] util: add overflow-safe `CeilDiv` helper Introduce `CeilDiv()` for integral ceiling division without the typical `(dividend + divisor - 1) / divisor` overflow. `CeilDiv()` asserts non-negative arguments and a non-zero divisor. Add unit tests covering return type deduction, max-value behavior, and divisor checks. Github-Pull: bitcoin/bitcoin#29678 Rebased-From: c41dd358fefb33d295a65c8f2ad304644735b7e0 --- src/test/util_tests.cpp | 43 +++++++++++++++++++++++++++++++++++++++++ src/util/overflow.h | 17 ++++++++++++++++ 2 files changed, 60 insertions(+) diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 4cacbd1151..204539efe2 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -1966,4 +1966,47 @@ BOOST_AUTO_TEST_CASE(mib_string_literal_test) BOOST_CHECK_EXCEPTION(operator""_MiB(static_cast(max_mib) + 1), std::overflow_error, HasReason("MiB value too large for size_t byte conversion")); } +BOOST_AUTO_TEST_CASE(ceil_div_test) +{ + // Return type is effectively the wider of the two types. + BOOST_CHECK((std::is_same_v)); + BOOST_CHECK((std::is_same_v)); + BOOST_CHECK((std::is_same_v)); + BOOST_CHECK((std::is_same_v)); + BOOST_CHECK((std::is_same_v)); + BOOST_CHECK((std::is_same_v)); + BOOST_CHECK((std::is_same_v)); + BOOST_CHECK((std::is_same_v)); + + // Basic ceiling division: exact divisions and rounding up. + BOOST_CHECK_EQUAL(CeilDiv(0ULL, 1ULL), 0ULL); + BOOST_CHECK_EQUAL(CeilDiv(1ULL, 1ULL), 1ULL); + BOOST_CHECK_EQUAL(CeilDiv(2ULL, 2ULL), 1ULL); + BOOST_CHECK_EQUAL(CeilDiv(3ULL, 2ULL), 2ULL); + BOOST_CHECK_EQUAL(CeilDiv(5ULL, 3ULL), 2ULL); + + // Works with size_t. + BOOST_CHECK_EQUAL(CeilDiv(size_t{0}, size_t{1}), size_t{0}); + BOOST_CHECK_EQUAL(CeilDiv(size_t{3}, size_t{2}), size_t{2}); + + // Works with uint32_t. + BOOST_CHECK_EQUAL(CeilDiv(0U, 1U), 0U); + BOOST_CHECK_EQUAL(CeilDiv(3U, 2U), 2U); + + // CeilDiv avoids overflow at max values. + constexpr uint64_t max_u64{std::numeric_limits::max()}; + BOOST_CHECK_EQUAL(CeilDiv(max_u64, 2ULL), (max_u64 / 2) + 1); + + // Mixed types: size_t dividend with uint32_t divisor. + constexpr size_t max_u32_as_size{std::numeric_limits::max()}; + BOOST_CHECK_EQUAL(CeilDiv(max_u32_as_size, uint32_t{2}), (max_u32_as_size / 2) + 1); +} + +BOOST_AUTO_TEST_CASE(ceil_div_zero_divisor_test) +{ + test_only_CheckFailuresAreExceptionsNotAborts check_failures; + BOOST_CHECK_THROW((void)CeilDiv(1ULL, 0ULL), NonFatalCheckError); + BOOST_CHECK_THROW((void)CeilDiv(size_t{1}, size_t{0}), NonFatalCheckError); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/util/overflow.h b/src/util/overflow.h index 67711af0a5..1813413931 100644 --- a/src/util/overflow.h +++ b/src/util/overflow.h @@ -5,6 +5,8 @@ #ifndef BITCOIN_UTIL_OVERFLOW_H #define BITCOIN_UTIL_OVERFLOW_H +#include + #include #include #include @@ -49,6 +51,21 @@ template return i + j; } +/** + * @brief Integer ceiling division (for non-negative values). + * + * Computes the smallest integer q such that q * divisor >= dividend. + * Both dividend and divisor must be non-negative, and divisor must be non-zero. + * + * The implementation avoids overflow that can occur with `(dividend + divisor - 1) / divisor`. + */ +template +[[nodiscard]] constexpr auto CeilDiv(const Dividend dividend, const Divisor divisor) +{ + Assert(dividend >= 0 && divisor > 0); + return dividend / divisor + (dividend % divisor != 0); +} + /** * @brief Left bit shift with overflow checking. * @param input The input value to be left shifted. From 58ceddd389e5a824bb26550bbbac8d3e22a99967 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 19 Mar 2024 05:04:24 +0000 Subject: [PATCH 250/356] Bugfix: init: For first-run disk space check, round up pruned size requirement Github-Pull: bitcoin/bitcoin#29678 Rebased-From: 8b33935ea6c1a6ca5cbbd9f3a8d1ee632cdf4a31 --- src/init.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/init.cpp b/src/init.cpp index 0a8766aa0f..6c1655086b 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -79,6 +79,7 @@ #include #include #include +#include #include #include #include @@ -1807,7 +1808,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) "Approximately %u GB of data will be stored in this directory." ), fs::quoted(fs::PathToString(args.GetBlocksDirPath())), - additional_bytes_needed / 1'000'000'000 + CeilDiv(additional_bytes_needed, 1'000'000'000) )); } } From 56ead1e25366237fd9db9056566d21fdb85087cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Mon, 19 Jan 2026 12:35:16 +0100 Subject: [PATCH 251/356] util: add `TicksSeconds` Add a helper to convert durations to integer seconds. Github-Pull: bitcoin/bitcoin#34328 Rebased-From: a9440b1595be7053b17895f7ee36652bac24be6e --- src/test/util_tests.cpp | 9 +++++++++ src/util/time.h | 6 ++++++ 2 files changed, 15 insertions(+) diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 4cacbd1151..98572388df 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -596,6 +596,15 @@ BOOST_AUTO_TEST_CASE(util_time_GetTime) BOOST_CHECK(us_0 < GetTime()); } +BOOST_AUTO_TEST_CASE(util_ticksseconds) +{ + BOOST_CHECK_EQUAL(TicksSeconds(0s), 0); + BOOST_CHECK_EQUAL(TicksSeconds(1s), 1); + BOOST_CHECK_EQUAL(TicksSeconds(999ms), 0); + BOOST_CHECK_EQUAL(TicksSeconds(1000ms), 1); + BOOST_CHECK_EQUAL(TicksSeconds(1500ms), 1); +} + BOOST_AUTO_TEST_CASE(test_IsDigit) { BOOST_CHECK_EQUAL(IsDigit('0'), true); diff --git a/src/util/time.h b/src/util/time.h index c43b306ff2..bc5a725498 100644 --- a/src/util/time.h +++ b/src/util/time.h @@ -73,6 +73,12 @@ constexpr auto Ticks(Dur2 d) { return std::chrono::duration_cast(d).count(); } + +template +constexpr int64_t TicksSeconds(Duration d) +{ + return int64_t{Ticks(d)}; +} template constexpr auto TicksSinceEpoch(Timepoint t) { From 804850709368e434633a70ea028ea8b0d5d7976f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Mon, 19 Jan 2026 12:35:26 +0100 Subject: [PATCH 252/356] rpc: make `uptime` monotonic across NTP jumps Compute `uptime` from `SteadyClock` so it is unaffected by system time changes after startup. Derive GUI startup time by subtracting the monotonic uptime from the wall clock time. Add a functional test covering a large `setmocktime` jump. Co-authored-by: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Github-Pull: bitcoin/bitcoin#34328 Rebased-From: 14f99cfe53f07280b6f047844fc4fba0da8cd328 --- src/common/system.cpp | 9 +++------ src/common/system.h | 6 ++++-- src/qt/clientmodel.cpp | 2 +- src/rpc/server.cpp | 2 +- test/functional/rpc_uptime.py | 9 ++++++--- 5 files changed, 15 insertions(+), 13 deletions(-) diff --git a/src/common/system.cpp b/src/common/system.cpp index 7af792db44..140fe64542 100644 --- a/src/common/system.cpp +++ b/src/common/system.cpp @@ -30,9 +30,6 @@ using util::ReplaceAll; -// Application startup time (used for uptime calculation) -const int64_t nStartupTime = GetTime(); - #ifndef WIN32 std::string ShellEscape(const std::string& arg) { @@ -105,8 +102,8 @@ int GetNumCores() return std::thread::hardware_concurrency(); } -// Obtain the application startup time (used for uptime calculation) -int64_t GetStartupTime() +SteadyClock::duration GetUptime() { - return nStartupTime; + static const auto g_startup_time{SteadyClock::now()}; + return SteadyClock::now() - g_startup_time; } diff --git a/src/common/system.h b/src/common/system.h index a4b56be9ac..21841b789b 100644 --- a/src/common/system.h +++ b/src/common/system.h @@ -7,12 +7,14 @@ #define BITCOIN_COMMON_SYSTEM_H #include // IWYU pragma: keep +#include +#include #include #include -// Application startup time (used for uptime calculation) -int64_t GetStartupTime(); +/// Monotonic uptime (not affected by system time changes). +SteadyClock::duration GetUptime(); void SetupEnvironment(); [[nodiscard]] bool SetupNetworking(); diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp index fb81dee8da..e32141490a 100644 --- a/src/qt/clientmodel.cpp +++ b/src/qt/clientmodel.cpp @@ -210,7 +210,7 @@ bool ClientModel::isReleaseVersion() const QString ClientModel::formatClientStartupTime() const { - return QDateTime::fromSecsSinceEpoch(GetStartupTime()).toString(); + return QDateTime::currentDateTime().addSecs(-TicksSeconds(GetUptime())).toString(); } QString ClientModel::dataDir() const diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index 34f19df256..7858e59e6b 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -192,7 +192,7 @@ static RPCHelpMan uptime() }, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { - return GetTime() - GetStartupTime(); + return TicksSeconds(GetUptime()); } }; } diff --git a/test/functional/rpc_uptime.py b/test/functional/rpc_uptime.py index fdf459953c..258ae7962f 100755 --- a/test/functional/rpc_uptime.py +++ b/test/functional/rpc_uptime.py @@ -26,9 +26,12 @@ def _test_negative_time(self): assert_raises_rpc_error(-8, "Mocktime must be in the range [0, 9223372036], not -1.", self.nodes[0].setmocktime, -1) def _test_uptime(self): - wait_time = 10 - self.nodes[0].setmocktime(int(time.time() + wait_time)) - assert self.nodes[0].uptime() >= wait_time + wait_time = 20_000 + uptime_before = self.nodes[0].uptime() + self.nodes[0].setmocktime(int(time.time()) + wait_time) + uptime_after = self.nodes[0].uptime() + self.nodes[0].setmocktime(0) + assert uptime_after - uptime_before < wait_time, "uptime should not jump with wall clock" if __name__ == '__main__': From ec19c6523e758725760951016056c0dd82b99873 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 21 Jan 2026 16:19:13 +0100 Subject: [PATCH 253/356] test: Scale NetworkThread close timeout with timeout_factor Github-Pull: bitcoin/bitcoin#34369 Rebased-From: fab055c907f1ab9ecac49e3d72909289a3b08c2d --- test/functional/test_framework/p2p.py | 2 +- test/functional/test_framework/test_framework.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index 207d19137b..ec3b229278 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -736,7 +736,7 @@ def run(self): """Start the network thread.""" self.network_event_loop.run_forever() - def close(self, *, timeout=10): + def close(self, *, timeout): """Close the connections and network event loop.""" self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) wait_until_helper_internal(lambda: not self.network_event_loop.is_running(), timeout=timeout) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index d5b338f2ba..97e5a810e2 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -322,7 +322,7 @@ def shutdown(self): pdb.set_trace() self.log.debug('Closing down network thread') - self.network_thread.close() + self.network_thread.close(timeout=self.options.timeout_factor * 10) if self.success == TestStatus.FAILED: self.log.info("Not stopping nodes as test failed. The dangling processes will be cleaned up later.") else: From 96221a69fbecfbd3f02540f5c32fe25adc014741 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 12 Jan 2026 15:40:01 -0800 Subject: [PATCH 254/356] gui: Show an error message if the restored wallet name is empty The Restore Wallet dialog rejects wallet names that are empty, but was doing so silently. This is confusing, we should be presenting an error message to the user. Github-Pull: bitcoin-core/gui#924 Rebased-From: dd904298c13b14ef518e24fa63c6d0962f4a2de0 --- src/qt/bitcoingui.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index 894a401e56..412238a017 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -436,7 +436,11 @@ void BitcoinGUI::createActions() //: Label of the input field where the name of the wallet is entered. QString label = tr("Wallet Name"); QString wallet_name = QInputDialog::getText(this, title, label, QLineEdit::Normal, "", &wallet_name_ok); - if (!wallet_name_ok || wallet_name.isEmpty()) return; + if (!wallet_name_ok) return; + if (wallet_name.isEmpty()) { + QMessageBox::critical(nullptr, tr("Invalid Wallet Name"), tr("Wallet name cannot be empty")); + return; + } auto activity = new RestoreWalletActivity(m_wallet_controller, this); connect(activity, &RestoreWalletActivity::restored, this, &BitcoinGUI::setCurrentWallet, Qt::QueuedConnection); From b786433ae90e549dde7be1fce807923d5f97240a Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 29 Jan 2026 08:00:16 +0000 Subject: [PATCH 255/356] lint: Tolerate subtree divergence --- test/lint/git-subtree-check.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/lint/git-subtree-check.sh b/test/lint/git-subtree-check.sh index 83816bb242..12ce2c1ba3 100755 --- a/test/lint/git-subtree-check.sh +++ b/test/lint/git-subtree-check.sh @@ -111,7 +111,7 @@ echo "$DIR in $COMMIT was last updated in commit $old (tree $tree_commit)" if [ "$tree_actual_tree" != "$tree_commit" ]; then git diff "$tree_commit" "$tree_actual_tree" >&2 echo "FAIL: subtree directory was touched without subtree merge" >&2 - exit 1 + exit 0 fi if [ "$check_remote" != "0" ]; then From 1e95c710637fe8ae3a2e0bb78f7b9fa8440717f5 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Thu, 20 Nov 2025 12:47:22 +0100 Subject: [PATCH 256/356] test: retry download in get_previous_releases.py Github-Pull: bitcoin/bitcoin#33915 Rebased-From: fad06f3bb436a97683e8248bfda1bd0d9998c899 --- test/get_previous_releases.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/get_previous_releases.py b/test/get_previous_releases.py index 1d397e721e..5b4f588492 100755 --- a/test/get_previous_releases.py +++ b/test/get_previous_releases.py @@ -17,6 +17,7 @@ import shutil import subprocess import sys +import time import hashlib SHA256_SUMS = { @@ -135,7 +136,12 @@ def download_binary(tag, args) -> int: ret = subprocess.run(['curl', '--fail', '--remote-name', tarballUrl]).returncode if ret: - return ret + print("Retrying download after failure ...", file=sys.stderr) + time.sleep(12) + ret = subprocess.run(['curl', '--fail', '--remote-name', tarballUrl]).returncode + if ret: + print("\nDownload failed a second time", file=sys.stderr) + return ret hasher = hashlib.sha256() with open(tarball, "rb") as afile: From c57009eefcf30091d86fccaa07f0722f6f235cb9 Mon Sep 17 00:00:00 2001 From: Padraic Slattery Date: Mon, 19 Jan 2026 17:45:37 +0100 Subject: [PATCH 257/356] chore: Update outdated GitHub Actions versions Github-Pull: #34344 Rebased-From: 9482f00df0b05e8ef710a7f0fac3262855ce335f --- .github/actions/configure-docker/action.yml | 2 +- .github/workflows/ci.yml | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/actions/configure-docker/action.yml b/.github/actions/configure-docker/action.yml index 131fdb1ccc..9bf970ee78 100644 --- a/.github/actions/configure-docker/action.yml +++ b/.github/actions/configure-docker/action.yml @@ -16,7 +16,7 @@ runs: # This is required to allow buildkit to access the actions cache - name: Expose actions cache variables - uses: actions/github-script@v6 + uses: actions/github-script@v8 with: script: | Object.keys(process.env).forEach(function (key) { diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ae614d5bb2..88e52a627e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,7 +49,7 @@ jobs: steps: - name: Determine fetch depth run: echo "FETCH_DEPTH=$((${{ github.event.pull_request.commits }} + 2))" >> "$GITHUB_ENV" - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: ${{ env.FETCH_DEPTH }} @@ -125,7 +125,7 @@ jobs: steps: - &CHECKOUT name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: # Ensure the latest merged pull request state is used, even on re-runs. ref: &CHECKOUT_REF_TMPL ${{ github.event_name == 'pull_request' && github.ref || '' }} @@ -164,7 +164,7 @@ jobs: FILE_ENV: ${{ matrix.file-env }} - name: Save Ccache cache - uses: actions/cache/save@v4 + uses: actions/cache/save@v5 if: github.event_name != 'pull_request' && steps.ccache-cache.outputs.cache-hit != 'true' with: path: ${{ env.CCACHE_DIR }} @@ -222,13 +222,13 @@ jobs: sed -i '1s/^/set(ENV{CMAKE_POLICY_VERSION_MINIMUM} 3.5)\n/' "${VCPKG_INSTALLATION_ROOT}/scripts/ports.cmake" - name: vcpkg tools cache - uses: actions/cache@v4 + uses: actions/cache@v5 with: path: C:/vcpkg/downloads/tools key: ${{ github.job }}-vcpkg-tools - name: Restore vcpkg binary cache - uses: actions/cache/restore@v4 + uses: actions/cache/restore@v5 id: vcpkg-binary-cache with: path: ~/AppData/Local/vcpkg/archives @@ -239,7 +239,7 @@ jobs: cmake -B build --preset vs2022-static -DCMAKE_TOOLCHAIN_FILE="$env:VCPKG_INSTALLATION_ROOT\scripts\buildsystems\vcpkg.cmake" ${{ matrix.generate-options }} - name: Save vcpkg binary cache - uses: actions/cache/save@v4 + uses: actions/cache/save@v5 if: github.event_name != 'pull_request' && steps.vcpkg-binary-cache.outputs.cache-hit != 'true' && matrix.job-type == 'standard' with: path: ~/AppData/Local/vcpkg/archives @@ -414,7 +414,7 @@ jobs: CONTAINER_NAME: "bitcoin-linter" steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v6 with: ref: *CHECKOUT_REF_TMPL fetch-depth: 0 From 6aec0958f12a65567a354a1d08d4bfed126cf34b Mon Sep 17 00:00:00 2001 From: Max Edwards Date: Thu, 25 Sep 2025 18:11:37 +0100 Subject: [PATCH 258/356] ci: remove 3rd party js from windows dll gha job We can use vswhere.exe directly to create a vs developer prompt and so can remove this third party dependency. Co-authored-by: David Gumberg Github-Pull: #32513 Rebased-From: 7ae0497eef8f5b37fc1184897a5bbc9f023dfa67 --- .github/workflows/ci.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 88e52a627e..b0af3eb95f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -198,11 +198,15 @@ jobs: steps: - *CHECKOUT - - name: Configure Developer Command Prompt for Microsoft Visual C++ - # Using microsoft/setup-msbuild is not enough. - uses: ilammy/msvc-dev-cmd@v1 - with: - arch: x64 + - name: Set up VS Developer Prompt + shell: pwsh -Command "$PSVersionTable; $PSNativeCommandUseErrorActionPreference = $true; $ErrorActionPreference = 'Stop'; & '{0}'" + run: | + $vswherePath = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" + $installationPath = & $vswherePath -latest -property installationPath + & "${env:COMSPEC}" /s /c "`"$installationPath\Common7\Tools\vsdevcmd.bat`" -arch=x64 -no_logo && set" | foreach-object { + $name, $value = $_ -split '=', 2 + echo "$name=$value" >> $env:GITHUB_ENV + } - name: Get tool information run: | From 3835e16e5fe9d77d10fe1ce819157980dcea65f8 Mon Sep 17 00:00:00 2001 From: fanquake Date: Thu, 29 Jan 2026 14:21:13 +0000 Subject: [PATCH 259/356] doc: update release notes for v29.x --- doc/release-notes.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/release-notes.md b/doc/release-notes.md index 263ee553d1..4970d195e8 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -73,8 +73,10 @@ Notable changes ### Misc +- #32513 ci: remove 3rd party js from windows dll gha job - #33508 ci: fix buildx gha cache authentication on forks - #33581 ci: Properly include $FILE_ENV in DEPENDS_HASH +- #34344 ci: update GitHub Actions versions Credits ======= @@ -90,6 +92,8 @@ Thanks to everyone who directly contributed to this release: - furszy - Hennadii Stepanov - ismaelsadeeq +- m3dwards +- Padraic Slattery - Pieter Wuille - SatsAndSports - willcl-ark From 340b58a8cee1ee9d108af500129896f2928c8681 Mon Sep 17 00:00:00 2001 From: sedited Date: Thu, 8 Jan 2026 19:59:15 +0100 Subject: [PATCH 260/356] Add sedited to trusted-keys Github-Pull: bitcoin/bitcoin#34236 Rebased-From: d1b227f3ad19e1364c74fcb3b34717bb2b9b9243 --- contrib/verify-commits/trusted-keys | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/verify-commits/trusted-keys b/contrib/verify-commits/trusted-keys index f25486776f..0121f290b0 100644 --- a/contrib/verify-commits/trusted-keys +++ b/contrib/verify-commits/trusted-keys @@ -3,3 +3,4 @@ D1DBF2C4B96F2DEBF4C16654410108112E7EA81F 152812300785C96444D3334D17565732E08E5E41 6B002C6EA3F91B1B0DF0C9BC8F617F1200A6D25C 4D1B3D5ECBA1A7E05371EEBE46800E30FC748A66 +A8FC55F3B04BA3146F3492E79303B33A305224CB From 16493e35cd3dff081cc24285543ea65fc881b6ae Mon Sep 17 00:00:00 2001 From: sedited Date: Thu, 29 Jan 2026 18:01:16 +0100 Subject: [PATCH 261/356] Bump version to 29.3rc2 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8337d69535..ed59b307cd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,7 +30,7 @@ set(CLIENT_NAME "Bitcoin Core") set(CLIENT_VERSION_MAJOR 29) set(CLIENT_VERSION_MINOR 3) set(CLIENT_VERSION_BUILD 0) -set(CLIENT_VERSION_RC 1) +set(CLIENT_VERSION_RC 2) set(CLIENT_VERSION_IS_RELEASE "true") set(COPYRIGHT_YEAR "2025") From 75c2108a684d395dc59a224968bed9801628a35d Mon Sep 17 00:00:00 2001 From: sedited Date: Thu, 29 Jan 2026 18:03:29 +0100 Subject: [PATCH 262/356] [doc] update release notes for 29.3rc2 --- doc/release-notes.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index 4970d195e8..4aebc143d7 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.3rc1 is now available from: +Bitcoin Core version 29.3rc2 is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. @@ -54,6 +54,7 @@ Notable changes - #34226 wallet: test: Relative wallet failed migration cleanup - #34123 wallet: migration, avoid creating spendable wallet from a watch-only legacy wallet - #34215 wallettool: fix unnamed createfromdump failure walletsdir deletion +- #34370 wallet: Additional cleanups for migration, and fixes for createfromdump with BDB ### Mining @@ -92,10 +93,12 @@ Thanks to everyone who directly contributed to this release: - furszy - Hennadii Stepanov - ismaelsadeeq +- luke-jr - m3dwards - Padraic Slattery - Pieter Wuille - SatsAndSports +- sedited - willcl-ark As well as to everyone that helped with translations on From 2b2c4daa5357d4d6abefe2c87eb74caf12c09342 Mon Sep 17 00:00:00 2001 From: sedited Date: Thu, 29 Jan 2026 18:08:10 +0100 Subject: [PATCH 263/356] [doc] generate manpages 29.3rc2 --- doc/man/bitcoin-cli.1 | 6 +++--- doc/man/bitcoin-qt.1 | 6 +++--- doc/man/bitcoin-tx.1 | 6 +++--- doc/man/bitcoin-util.1 | 6 +++--- doc/man/bitcoin-wallet.1 | 6 +++--- doc/man/bitcoind.1 | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index aad8ffd52b..e762c0fa39 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "January 2026" "bitcoin-cli v29.3.0rc1" "User Commands" +.TH BITCOIN-CLI "1" "January 2026" "bitcoin-cli v29.3.0rc2" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc1 +bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc2 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc1 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Core RPC client version v29.3.0rc1 +Bitcoin Core RPC client version v29.3.0rc2 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server. .PP diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index ba38159542..4a990e8082 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "January 2026" "bitcoin-qt v29.3.0rc1" "User Commands" +.TH BITCOIN-QT "1" "January 2026" "bitcoin-qt v29.3.0rc2" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.3.0rc1 +bitcoin-qt \- manual page for bitcoin-qt v29.3.0rc2 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Core version v29.3.0rc1 +Bitcoin Core version v29.3.0rc2 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core. .PP diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index 4be1d58291..7b3b996afd 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "January 2026" "bitcoin-tx v29.3.0rc1" "User Commands" +.TH BITCOIN-TX "1" "January 2026" "bitcoin-tx v29.3.0rc2" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc1 +bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc2 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc1 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Core bitcoin\-tx utility version v29.3.0rc1 +Bitcoin Core bitcoin\-tx utility version v29.3.0rc2 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index f4bc33f4da..d10f4b48b3 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "January 2026" "bitcoin-util v29.3.0rc1" "User Commands" +.TH BITCOIN-UTIL "1" "January 2026" "bitcoin-util v29.3.0rc2" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.3.0rc1 +bitcoin-util \- manual page for bitcoin-util v29.3.0rc2 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.3.0rc1 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-util utility version v29.3.0rc1 +Bitcoin Core bitcoin\-util utility version v29.3.0rc2 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index 000fb2a814..c8737d7989 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "January 2026" "bitcoin-wallet v29.3.0rc1" "User Commands" +.TH BITCOIN-WALLET "1" "January 2026" "bitcoin-wallet v29.3.0rc2" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.3.0rc1 +bitcoin-wallet \- manual page for bitcoin-wallet v29.3.0rc2 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-wallet utility version v29.3.0rc1 +Bitcoin Core bitcoin\-wallet utility version v29.3.0rc2 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files. .PP diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index f0005de61c..bcf2354452 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "January 2026" "bitcoind v29.3.0rc1" "User Commands" +.TH BITCOIND "1" "January 2026" "bitcoind v29.3.0rc2" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.3.0rc1 +bitcoind \- manual page for bitcoind v29.3.0rc2 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Core daemon version v29.3.0rc1 +Bitcoin Core daemon version v29.3.0rc2 .PP The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP From c202961b8e4588a09381386c04c009258674271d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Wed, 28 Jan 2026 23:45:33 +0100 Subject: [PATCH 264/356] fix: uptime RPC returns 0 on first call The monotonic uptime fix (#34328) used a function-local static for `g_startup_time`, which was initialized on first `GetUptime()` call instead of app startup time. This caused the first `uptime()` call to always return 0. Move `g_startup_time` to namespace scope so it initializes at program start, ensuring the first call returns actual elapsed time. Note that we don't need to make it `static` anymore because it is just used in this single translation unit. Test was updated to simulate some work before the first call. Co-authored-by: Carlo Antinarella Github-Pull: bitcoin/bitcoin#34437 Rebased-From: e67a676df9af5ece5307438ae1b4ddb0730e3482 --- src/common/system.cpp | 10 +++++----- test/functional/rpc_uptime.py | 5 ++++- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/common/system.cpp b/src/common/system.cpp index 140fe64542..4e8e9a9aaf 100644 --- a/src/common/system.cpp +++ b/src/common/system.cpp @@ -102,8 +102,8 @@ int GetNumCores() return std::thread::hardware_concurrency(); } -SteadyClock::duration GetUptime() -{ - static const auto g_startup_time{SteadyClock::now()}; - return SteadyClock::now() - g_startup_time; -} +namespace { + const auto g_startup_time{SteadyClock::now()}; +} // namespace + +SteadyClock::duration GetUptime() { return SteadyClock::now() - g_startup_time; } diff --git a/test/functional/rpc_uptime.py b/test/functional/rpc_uptime.py index 258ae7962f..95e4c10b6d 100755 --- a/test/functional/rpc_uptime.py +++ b/test/functional/rpc_uptime.py @@ -26,8 +26,11 @@ def _test_negative_time(self): assert_raises_rpc_error(-8, "Mocktime must be in the range [0, 9223372036], not -1.", self.nodes[0].setmocktime, -1) def _test_uptime(self): - wait_time = 20_000 + time.sleep(1) # Do some work before checking uptime uptime_before = self.nodes[0].uptime() + assert uptime_before > 0, "uptime should begin at app start" + + wait_time = 20_000 self.nodes[0].setmocktime(int(time.time()) + wait_time) uptime_after = self.nodes[0].uptime() self.nodes[0].setmocktime(0) From 012a5fa3847b4cfe9f665c48b4fb88156a7e01d0 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Thu, 22 Jan 2026 17:22:17 +0000 Subject: [PATCH 265/356] Bugfix: Rework MSVCRT workaround to correctly exclusive-open on Windows --- src/node/blockstorage.cpp | 2 +- src/test/streams_tests.cpp | 2 +- src/util/fs.cpp | 20 ++++++++++++++++++++ src/util/fs_helpers.cpp | 9 +-------- 4 files changed, 23 insertions(+), 10 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 372395dd24..98f3d107c2 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -1144,7 +1144,7 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) } else { // Create initial or missing xor key file AutoFile xor_key_file{fsbridge::fopen(xor_key_path, -#ifdef __MINGW64__ +#if 0 "wb" // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210 #else "wbx" diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index 777122df6d..6a6fdfe651 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -29,7 +29,7 @@ BOOST_AUTO_TEST_CASE(xor_file) BOOST_CHECK_EXCEPTION(xor_file.ignore(1), std::ios_base::failure, HasReason{"AutoFile::ignore: file handle is nullpt"}); } { -#ifdef __MINGW64__ +#if 0 // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210 const char* mode = "wb"; #else diff --git a/src/util/fs.cpp b/src/util/fs.cpp index 348c1b3383..78e3a2ace6 100644 --- a/src/util/fs.cpp +++ b/src/util/fs.cpp @@ -2,6 +2,7 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include #include #include @@ -13,7 +14,13 @@ #include #else #include +#include +#include +#include #include +#include +#include +#include #include #endif @@ -25,9 +32,22 @@ namespace fsbridge { FILE *fopen(const fs::path& p, const char *mode) { + const bool exclusive{strchr(mode, 'x') != nullptr}; #ifndef WIN32 + Assume((!exclusive) || !strcmp(mode, "wbx")); return ::fopen(p.c_str(), mode); #else + if (exclusive) { + Assert(!strcmp(mode, "wbx")); + int fd; + if (::_wsopen_s(&fd, p.wstring().c_str(), _O_WRONLY | _O_CREAT | _O_EXCL | _O_BINARY, _SH_DENYNO, _S_IREAD | _S_IWRITE)) { + return nullptr; + } + FILE* fp = ::_fdopen(fd, "wb"); + if (!fp) ::_close(fd); + return fp; + } + std::wstring_convert,wchar_t> utf8_cvt; return ::_wfopen(p.wstring().c_str(), utf8_cvt.from_bytes(mode).c_str()); #endif diff --git a/src/util/fs_helpers.cpp b/src/util/fs_helpers.cpp index 061f2c79b0..906d5d82c8 100644 --- a/src/util/fs_helpers.cpp +++ b/src/util/fs_helpers.cpp @@ -318,14 +318,7 @@ bool IsDirWritable(const fs::path& dir_path) FastRandomContext rng; const auto tmp = dir_path / fs::PathFromString(strprintf(".tmp_%d", rng.rand64())); - const char* mode; -#ifdef __MINGW64__ - mode = "w"; // Temporary workaround for https://github.com/bitcoin/bitcoin/issues/30210 -#else - mode = "wx"; -#endif - - if (const auto created{fsbridge::fopen(tmp, mode)}) { + if (const auto created{fsbridge::fopen(tmp, "wbx")}) { std::fclose(created); std::error_code ec; fs::remove(tmp, ec); // clean up, ignore errors From 8f6b03b8c1c24cd2950bc4efb2898f19b5adf095 Mon Sep 17 00:00:00 2001 From: Felipe Micaroni Lalli Date: Sat, 10 Jan 2026 16:51:50 -0300 Subject: [PATCH 266/356] GUI: Fix typo in options dialog tooltip addressses -> addresses Github-Pull: bitcoinknots/bitcoin#245 Rebased-From: 691b0e66821887d83f817c918f6da126232d7eca --- src/qt/optionsdialog.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index a469dc1ce9..e301ac61e8 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -378,7 +378,7 @@ OptionsDialog::OptionsDialog(QWidget* parent, bool enableWallet) rejectunknownwitness = new QCheckBox(groupBox_Spamfiltering); rejectunknownwitness->setText(tr("Reject unknown witness script versions")); - rejectunknownwitness->setToolTip(tr("Some attempts to spam Bitcoin intentionally use undefined witness script formats reserved for future use. By enabling this option, your node will reject transactions using these undefined/future versions. Note that if you send to many addressses in a single transaction, the entire transaction may be rejected if any single one of them attempts to use an undefined format.")); + rejectunknownwitness->setToolTip(tr("Some attempts to spam Bitcoin intentionally use undefined witness script formats reserved for future use. By enabling this option, your node will reject transactions using these undefined/future versions. Note that if you send to many addresses in a single transaction, the entire transaction may be rejected if any single one of them attempts to use an undefined format.")); verticalLayout_Spamfiltering->addWidget(rejectunknownwitness); FixTabOrder(rejectunknownwitness); From 67168d7f1c6122f3838a63c194b4cb6197de68a2 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Fri, 30 Jan 2026 15:10:54 +0000 Subject: [PATCH 267/356] util: Drop *BSD headers in `batchpriority.cpp` Currently, there are issues with headers in `batchpriority.cpp`: 1. `SCHED_BATCH` is not defined on all supported *BSD platforms. 2. `pthread.h` is necessary on other platforms. This addresses both issues and fixes other includes. Github-Pull: bitcoin/bitcoin#34462 Rebased-From: 07af50f7896a36a82efc19b5030779ab36302fa4 --- src/util/batchpriority.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/util/batchpriority.cpp b/src/util/batchpriority.cpp index c73aef1eb4..0731910ddd 100644 --- a/src/util/batchpriority.cpp +++ b/src/util/batchpriority.cpp @@ -2,15 +2,15 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include + #include #include -#if (defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)) -#include -#include -#endif +#include #ifndef WIN32 +#include #include #endif From 1e2eaebd7982849df078a7ad192eb46a2e85dc89 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Sun, 1 Feb 2026 19:49:28 +0100 Subject: [PATCH 268/356] Initialize file_size to 0 to avoid UB Github-Pull: bitcoin-core/leveldb-subtree#58 Rebased-From: ad9b1c989380538bfb19c5c65df0d3f72d8ed62b --- src/leveldb/db/db_impl.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/leveldb/db/db_impl.cc b/src/leveldb/db/db_impl.cc index 90c1c811d8..6c01fe103c 100644 --- a/src/leveldb/db/db_impl.cc +++ b/src/leveldb/db/db_impl.cc @@ -740,6 +740,7 @@ Status DBImpl::OpenCompactionOutputFile(CompactionState* compact) { pending_outputs_.insert(file_number); CompactionState::Output out; out.number = file_number; + out.file_size = 0; out.smallest.Clear(); out.largest.Clear(); compact->outputs.push_back(out); From 8caf0836a8d9a7ec44c9a9bfa2c52d2708601dcb Mon Sep 17 00:00:00 2001 From: codeabysss Date: Sun, 7 Dec 2025 22:51:54 +0300 Subject: [PATCH 269/356] saturate LocalServiceInfo::nScore to prevent overflow and improve test to verify increment behavior and saturation at max value Github-Pull: bitcoin/bitcoin#34028 Rebased-From: 10100f86f5f81aef54fa45031b1de2064d792722 --- src/net.cpp | 6 ++++-- src/net.h | 3 +++ src/test/net_tests.cpp | 30 ++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 2 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index 09a3d8617a..1deef8740d 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -219,7 +219,7 @@ CService GetLocalAddress(const CNode& peer) return GetLocal(peer).value_or(CService{CNetAddr(), GetListenPort()}); } -static int GetnScore(const CService& addr) +int GetnScore(const CService& addr) { LOCK(g_maplocalhost_mutex); const auto it = mapLocalHost.find(addr); @@ -316,7 +316,9 @@ bool SeenLocal(const CService& addr) LOCK(g_maplocalhost_mutex); const auto it = mapLocalHost.find(addr); if (it == mapLocalHost.end()) return false; - ++it->second.nScore; + if (it->second.nScore < std::numeric_limits::max()) { + ++it->second.nScore; + } return true; } diff --git a/src/net.h b/src/net.h index ddee34168a..8db742d7d5 100644 --- a/src/net.h +++ b/src/net.h @@ -98,6 +98,9 @@ static constexpr bool DEFAULT_V2_TRANSPORT{false}; typedef int64_t NodeId; +/** Get the score of a local address. */ +int GetnScore(const CService& addr); + struct AddedNodeParams { std::string m_added_node; bool m_use_v2transport; diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 7c98c382e4..6105b19b03 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -808,6 +808,36 @@ BOOST_AUTO_TEST_CASE(LocalAddress_BasicLifecycle) BOOST_CHECK(!IsLocal(addr)); } +BOOST_AUTO_TEST_CASE(LocalAddress_nScore_Overflow) +{ + g_reachable_nets.Add(NET_IPV4); + CService addr{UtilBuildAddress(0x002, 0x001, 0x001, 0x001), 1000}; // 2.1.1.1:1000 + + // SeenLocal increments when nScore is below max + const int initial_score = 1000; + BOOST_REQUIRE(AddLocal(addr, initial_score)); + BOOST_REQUIRE(IsLocal(addr)); + BOOST_CHECK_EQUAL(GetnScore(addr), initial_score); + + // SeenLocal increments the score + BOOST_CHECK(SeenLocal(addr)); + BOOST_CHECK_EQUAL(GetnScore(addr), initial_score + 1); + + // SeenLocal saturates at max + RemoveLocal(addr); + BOOST_REQUIRE(AddLocal(addr, std::numeric_limits::max())); + BOOST_CHECK_EQUAL(GetnScore(addr), std::numeric_limits::max()); + + // a couple increments should saturate + for (int i = 0; i < 2; ++i) { + BOOST_CHECK(SeenLocal(addr)); + BOOST_CHECK_EQUAL(GetnScore(addr), std::numeric_limits::max()); + } + + RemoveLocal(addr); + BOOST_CHECK(!IsLocal(addr)); +} + BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) { LOCK(NetEventsInterface::g_msgproc_mutex); From 824de1e6b32c898408c8a03fb75f09454eb8dcca Mon Sep 17 00:00:00 2001 From: sedited Date: Mon, 9 Feb 2026 10:14:48 +0100 Subject: [PATCH 270/356] build: Bump version to 29.3 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ed59b307cd..6e2c868089 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -30,7 +30,7 @@ set(CLIENT_NAME "Bitcoin Core") set(CLIENT_VERSION_MAJOR 29) set(CLIENT_VERSION_MINOR 3) set(CLIENT_VERSION_BUILD 0) -set(CLIENT_VERSION_RC 2) +set(CLIENT_VERSION_RC 0) set(CLIENT_VERSION_IS_RELEASE "true") set(COPYRIGHT_YEAR "2025") From ea6af54a637bb06e66a233a2a6ff3b9d145f9414 Mon Sep 17 00:00:00 2001 From: sedited Date: Mon, 9 Feb 2026 10:16:24 +0100 Subject: [PATCH 271/356] doc: Update release notes for 29.3 --- doc/release-notes.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/release-notes.md b/doc/release-notes.md index 4aebc143d7..98713c6cea 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,6 +1,6 @@ -Bitcoin Core version 29.3rc2 is now available from: +Bitcoin Core version 29.3 is now available from: - + This release includes various bug fixes and performance improvements, as well as updated translations. From c9a7bd90eb5a60657e68f33a03782f2a9ceefe86 Mon Sep 17 00:00:00 2001 From: sedited Date: Mon, 9 Feb 2026 10:24:53 +0100 Subject: [PATCH 272/356] doc: Update man pages for v29.3 --- doc/man/bitcoin-cli.1 | 6 +++--- doc/man/bitcoin-qt.1 | 6 +++--- doc/man/bitcoin-tx.1 | 6 +++--- doc/man/bitcoin-util.1 | 6 +++--- doc/man/bitcoin-wallet.1 | 6 +++--- doc/man/bitcoind.1 | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/doc/man/bitcoin-cli.1 b/doc/man/bitcoin-cli.1 index e762c0fa39..6146224cdd 100644 --- a/doc/man/bitcoin-cli.1 +++ b/doc/man/bitcoin-cli.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-CLI "1" "January 2026" "bitcoin-cli v29.3.0rc2" "User Commands" +.TH BITCOIN-CLI "1" "February 2026" "bitcoin-cli v29.3.0" "User Commands" .SH NAME -bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc2 +bitcoin-cli \- manual page for bitcoin-cli v29.3.0 .SH SYNOPSIS .B bitcoin-cli [\fI\,options\/\fR] \fI\, \/\fR[\fI\,params\/\fR] @@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc2 .B bitcoin-cli [\fI\,options\/\fR] \fI\,help \/\fR .SH DESCRIPTION -Bitcoin Core RPC client version v29.3.0rc2 +Bitcoin Core RPC client version v29.3.0 .PP The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server. .PP diff --git a/doc/man/bitcoin-qt.1 b/doc/man/bitcoin-qt.1 index 4a990e8082..a4fec08c7a 100644 --- a/doc/man/bitcoin-qt.1 +++ b/doc/man/bitcoin-qt.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-QT "1" "January 2026" "bitcoin-qt v29.3.0rc2" "User Commands" +.TH BITCOIN-QT "1" "February 2026" "bitcoin-qt v29.3.0" "User Commands" .SH NAME -bitcoin-qt \- manual page for bitcoin-qt v29.3.0rc2 +bitcoin-qt \- manual page for bitcoin-qt v29.3.0 .SH SYNOPSIS .B bitcoin-qt [\fI\,options\/\fR] [\fI\,URI\/\fR] .SH DESCRIPTION -Bitcoin Core version v29.3.0rc2 +Bitcoin Core version v29.3.0 .PP The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core. .PP diff --git a/doc/man/bitcoin-tx.1 b/doc/man/bitcoin-tx.1 index 7b3b996afd..a58bfea152 100644 --- a/doc/man/bitcoin-tx.1 +++ b/doc/man/bitcoin-tx.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-TX "1" "January 2026" "bitcoin-tx v29.3.0rc2" "User Commands" +.TH BITCOIN-TX "1" "February 2026" "bitcoin-tx v29.3.0" "User Commands" .SH NAME -bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc2 +bitcoin-tx \- manual page for bitcoin-tx v29.3.0 .SH SYNOPSIS .B bitcoin-tx [\fI\,options\/\fR] \fI\, \/\fR[\fI\,commands\/\fR] @@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc2 .B bitcoin-tx [\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR] .SH DESCRIPTION -Bitcoin Core bitcoin\-tx utility version v29.3.0rc2 +Bitcoin Core bitcoin\-tx utility version v29.3.0 .PP The bitcoin\-tx tool is used for creating and modifying bitcoin transactions. .PP diff --git a/doc/man/bitcoin-util.1 b/doc/man/bitcoin-util.1 index d10f4b48b3..b7e0127ae1 100644 --- a/doc/man/bitcoin-util.1 +++ b/doc/man/bitcoin-util.1 @@ -1,7 +1,7 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-UTIL "1" "January 2026" "bitcoin-util v29.3.0rc2" "User Commands" +.TH BITCOIN-UTIL "1" "February 2026" "bitcoin-util v29.3.0" "User Commands" .SH NAME -bitcoin-util \- manual page for bitcoin-util v29.3.0rc2 +bitcoin-util \- manual page for bitcoin-util v29.3.0 .SH SYNOPSIS .B bitcoin-util [\fI\,options\/\fR] [\fI\,command\/\fR] @@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.3.0rc2 .B bitcoin-util [\fI\,options\/\fR] \fI\,grind \/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-util utility version v29.3.0rc2 +Bitcoin Core bitcoin\-util utility version v29.3.0 .PP The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below. .SH OPTIONS diff --git a/doc/man/bitcoin-wallet.1 b/doc/man/bitcoin-wallet.1 index c8737d7989..96c2f4e1c9 100644 --- a/doc/man/bitcoin-wallet.1 +++ b/doc/man/bitcoin-wallet.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIN-WALLET "1" "January 2026" "bitcoin-wallet v29.3.0rc2" "User Commands" +.TH BITCOIN-WALLET "1" "February 2026" "bitcoin-wallet v29.3.0" "User Commands" .SH NAME -bitcoin-wallet \- manual page for bitcoin-wallet v29.3.0rc2 +bitcoin-wallet \- manual page for bitcoin-wallet v29.3.0 .SH SYNOPSIS .B bitcoin-wallet [\fI\,options\/\fR] \fI\,\/\fR .SH DESCRIPTION -Bitcoin Core bitcoin\-wallet utility version v29.3.0rc2 +Bitcoin Core bitcoin\-wallet utility version v29.3.0 .PP bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files. .PP diff --git a/doc/man/bitcoind.1 b/doc/man/bitcoind.1 index bcf2354452..10ec23f695 100644 --- a/doc/man/bitcoind.1 +++ b/doc/man/bitcoind.1 @@ -1,12 +1,12 @@ .\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3. -.TH BITCOIND "1" "January 2026" "bitcoind v29.3.0rc2" "User Commands" +.TH BITCOIND "1" "February 2026" "bitcoind v29.3.0" "User Commands" .SH NAME -bitcoind \- manual page for bitcoind v29.3.0rc2 +bitcoind \- manual page for bitcoind v29.3.0 .SH SYNOPSIS .B bitcoind [\fI\,options\/\fR] .SH DESCRIPTION -Bitcoin Core daemon version v29.3.0rc2 +Bitcoin Core daemon version v29.3.0 .PP The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses. .PP From c4c558b2c45f6b05c3cc5bf08ca8e22364632c61 Mon Sep 17 00:00:00 2001 From: merge-script Date: Tue, 10 Feb 2026 02:26:19 +0000 Subject: [PATCH 273/356] Revert "Merge 33475 via fix_block_full_enough_underflow-29+k" This reverts commit dc8ce7dbd471e41df2368ab90f45c0d97fb54739, reversing changes made to 11d375abca565a4993993b2d7a82296b4566321c. --- src/node/miner.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/node/miner.cpp b/src/node/miner.cpp index 07a0a3b5c5..2b9c07d469 100644 --- a/src/node/miner.cpp +++ b/src/node/miner.cpp @@ -395,8 +395,8 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda ++nConsecutiveFailed; - if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight + - BLOCK_FULL_ENOUGH_WEIGHT_DELTA > m_options.nBlockMaxWeight) { + if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight > + m_options.nBlockMaxWeight - BLOCK_FULL_ENOUGH_WEIGHT_DELTA) { // Give up if we're close to full and haven't succeeded in a while break; } From 945bdce2958dbbeec73a2e9463ac367d3e66400f Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Sat, 7 Feb 2026 02:25:57 +0000 Subject: [PATCH 274/356] Wallet: Even if addresstype==legacy, use non-legacy change if there's no legacy sPKman --- src/wallet/wallet.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 162d7f9ec7..46a6556bc2 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -2210,7 +2210,7 @@ OutputType CWallet::TransactionChangeType(const std::optional& chang } // if m_default_address_type is legacy, use legacy address as change. - if (m_default_address_type == OutputType::LEGACY) { + if (m_default_address_type == OutputType::LEGACY && GetScriptPubKeyMan(OutputType::LEGACY, /*internal=*/true)) { return OutputType::LEGACY; } From fd01caa5de317cfceafdcf395c5871accee68f6b Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 10 Feb 2026 02:45:00 +0000 Subject: [PATCH 275/356] QA: rpc_psbt: Test walletcreatefundedpsbt with addresstype=legacy but no legacy change descriptors --- test/functional/rpc_psbt.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index 60df48f025..ccbf52fbbf 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -176,6 +176,20 @@ def test_input_confs_control(self): wallet.unloadwallet() + def test_addresstype_legacy_with_no_legacy_change(self): + self.generate(self.nodes[2], 1) + self.log.info("Test walletcreatefundedpsbt with addresstype=legacy but no legacy change descriptors") + self.restart_node(2, extra_args=["-addresstype=legacy"]) + self.connect_nodes(0, 2) + self.connect_nodes(1, 2) + self.nodes[2].createwallet(wallet_name='no_legacy_change', blank=True) + w = self.nodes[2].get_wallet_rpc('no_legacy_change') + xprv = 'tprv8ZgxMBicQKsPevADjDCWsa6DfhkVXicu8NQUzfibwX2MexVwW4tCec5mXdCW8kJwkzBRRmAay1KZya4WsehVvjTGVW6JLqiqd8DdZ4xSg52' + assert w.importdescriptors([{"desc": descsum_create(f'tr({xprv}/*)'), "internal": True, "timestamp":"now", 'active': True, 'range': (0,100)}])[0]['success'] + self.nodes[0].sendtoaddress(w.getrawchangeaddress(address_type='bech32m'), 20) + self.generate(self.nodes[0], 6) + w.walletcreatefundedpsbt([], {self.nodes[0].getnewaddress():10})['psbt'] + def assert_change_type(self, psbtx, expected_type): """Assert that the given PSBT has a change output with the given type.""" @@ -998,6 +1012,9 @@ def test_psbt_input_keys(psbt_input, keys): self.log.info("Test descriptorprocesspsbt raises if an invalid sighashtype is passed") assert_raises_rpc_error(-8, "all is not a valid sighash parameter.", self.nodes[2].descriptorprocesspsbt, psbt, [descriptor], sighashtype="all") + if self.options.descriptors: + self.test_addresstype_legacy_with_no_legacy_change() + if __name__ == '__main__': PSBTTest().main() From 1a33e563c905c43c1ccea726c8e764740546f87b Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 3 Oct 2025 14:20:12 +0000 Subject: [PATCH 276/356] Use TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED for non-consensus Taproot logic --- src/psbt.h | 2 +- src/script/descriptor.cpp | 2 +- src/script/interpreter.h | 2 ++ src/script/signingprovider.cpp | 4 ++-- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/psbt.h b/src/psbt.h index 6d49864b3c..88a22a84ae 100644 --- a/src/psbt.h +++ b/src/psbt.h @@ -872,7 +872,7 @@ struct PSBTOutput s_tree >> depth; s_tree >> leaf_ver; s_tree >> script; - if (depth > TAPROOT_CONTROL_MAX_NODE_COUNT) { + if (depth > TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED) { throw std::ios_base::failure("Output Taproot tree has as leaf greater than Taproot maximum depth"); } if ((leaf_ver & ~TAPROOT_LEAF_MASK) != 0) { diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 88966b1a3c..a1615fbdcc 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -1967,7 +1967,7 @@ std::vector> ParseScript(uint32_t& key_exp_index // First process all open braces. while (Const("{", expr)) { branches.push_back(false); // new left branch - if (branches.size() > TAPROOT_CONTROL_MAX_NODE_COUNT) { + if (branches.size() > TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED) { error = strprintf("tr() supports at most %i nesting levels", TAPROOT_CONTROL_MAX_NODE_COUNT); return {}; } diff --git a/src/script/interpreter.h b/src/script/interpreter.h index b56933c644..15b4d09dd3 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -234,6 +234,8 @@ static constexpr size_t TAPROOT_CONTROL_BASE_SIZE = 33; static constexpr size_t TAPROOT_CONTROL_NODE_SIZE = 32; static constexpr size_t TAPROOT_CONTROL_MAX_NODE_COUNT = 128; static constexpr size_t TAPROOT_CONTROL_MAX_SIZE = TAPROOT_CONTROL_BASE_SIZE + TAPROOT_CONTROL_NODE_SIZE * TAPROOT_CONTROL_MAX_NODE_COUNT; +static constexpr size_t TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED = 7; +static constexpr size_t TAPROOT_CONTROL_MAX_SIZE_REDUCED = TAPROOT_CONTROL_BASE_SIZE + TAPROOT_CONTROL_NODE_SIZE * TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED; extern const HashWriter HASHER_TAPSIGHASH; //!< Hasher with tag "TapSighash" pre-fed to it. extern const HashWriter HASHER_TAPLEAF; //!< Hasher with tag "TapLeaf" pre-fed to it. diff --git a/src/script/signingprovider.cpp b/src/script/signingprovider.cpp index d029ee1a96..e2f85adfa0 100644 --- a/src/script/signingprovider.cpp +++ b/src/script/signingprovider.cpp @@ -365,7 +365,7 @@ void TaprootBuilder::Insert(TaprootBuilder::NodeInfo&& node, int depth) // as what Insert() performs on the m_branch variable. Instead of // storing a NodeInfo object, just remember whether or not there is one // at that depth. - if (depth < 0 || (size_t)depth > TAPROOT_CONTROL_MAX_NODE_COUNT) return false; + if (depth < 0 || (size_t)depth > TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED) return false; if ((size_t)depth + 1 < branch.size()) return false; while (branch.size() > (size_t)depth && branch[depth]) { branch.pop_back(); @@ -478,7 +478,7 @@ std::optional, int>>> Inf // Skip script records with nonsensical leaf version. if (leaf_ver < 0 || leaf_ver >= 0x100 || leaf_ver & 1) continue; // Skip script records with invalid control block sizes. - if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > TAPROOT_CONTROL_MAX_SIZE || + if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > TAPROOT_CONTROL_MAX_SIZE_REDUCED || ((control.size() - TAPROOT_CONTROL_BASE_SIZE) % TAPROOT_CONTROL_NODE_SIZE) != 0) continue; // Skip script records that don't match the control block. if ((control[0] & TAPROOT_LEAF_MASK) != leaf_ver) continue; From aabf3d8bd5f4d268a9581bb7f13746c1d5ded970 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Sat, 4 Oct 2025 12:58:18 +0000 Subject: [PATCH 277/356] Policy: Enforce SCRIPT_VERIFY_REDUCED_DATA as a policy rule --- src/policy/policy.h | 3 ++- src/script/interpreter.h | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/src/policy/policy.h b/src/policy/policy.h index 9dbab66a75..d19433285e 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -170,7 +170,8 @@ static constexpr unsigned int STANDARD_SCRIPT_VERIFY_FLAGS{MANDATORY_SCRIPT_VERI SCRIPT_VERIFY_CONST_SCRIPTCODE | SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION | SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS | - SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE}; + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE | + SCRIPT_VERIFY_REDUCED_DATA}; /** For convenience, standard but not mandatory verify flags. */ static constexpr unsigned int STANDARD_NOT_MANDATORY_VERIFY_FLAGS{STANDARD_SCRIPT_VERIFY_FLAGS & ~MANDATORY_SCRIPT_VERIFY_FLAGS}; diff --git a/src/script/interpreter.h b/src/script/interpreter.h index 15b4d09dd3..22e234fb76 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -143,6 +143,9 @@ enum : uint32_t { // Making unknown public key versions (in BIP 342 scripts) non-standard SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE = (1U << 20), + // TBD + SCRIPT_VERIFY_REDUCED_DATA = (1U << 21), + // Constants to point to the highest flag in use. Add new flags above this line. // SCRIPT_VERIFY_END_MARKER From f8efd5b4f3570515f559d4e1221748d44e6330a8 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Mon, 6 Oct 2025 15:29:51 +0000 Subject: [PATCH 278/356] script: Define SCRIPT_VERIFY_REDUCED_DATA verification flag (unused) to reduce data push size limit to 256 bytes (except for P2SH redeemScript push) --- src/script/interpreter.cpp | 22 ++++++++++++++++++++-- src/script/interpreter.h | 3 ++- src/script/script.h | 1 + 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 7d32fec1f1..78354c8d57 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -433,6 +433,8 @@ bool EvalScript(std::vector >& stack, const CScript& execdata.m_codeseparator_pos = 0xFFFFFFFFUL; execdata.m_codeseparator_pos_init = true; + const unsigned int max_element_size = (flags & SCRIPT_VERIFY_REDUCED_DATA) ? MAX_SCRIPT_ELEMENT_SIZE_REDUCED : MAX_SCRIPT_ELEMENT_SIZE; + try { for (; pc < pend; ++opcode_pos) { @@ -443,7 +445,7 @@ bool EvalScript(std::vector >& stack, const CScript& // if (!script.GetOp(pc, opcode, vchPushValue)) return set_error(serror, SCRIPT_ERR_BAD_OPCODE); - if (vchPushValue.size() > MAX_SCRIPT_ELEMENT_SIZE) + if (vchPushValue.size() > max_element_size) return set_error(serror, SCRIPT_ERR_PUSH_SIZE); if (sigversion == SigVersion::BASE || sigversion == SigVersion::WITNESS_V0) { @@ -1858,8 +1860,9 @@ static bool ExecuteWitnessScript(const Span& stack_span, const CS } // Disallow stack item size > MAX_SCRIPT_ELEMENT_SIZE in witness stack + const unsigned int max_element_size = (flags & SCRIPT_VERIFY_REDUCED_DATA) ? MAX_SCRIPT_ELEMENT_SIZE_REDUCED : MAX_SCRIPT_ELEMENT_SIZE; for (const valtype& elem : stack) { - if (elem.size() > MAX_SCRIPT_ELEMENT_SIZE) return set_error(serror, SCRIPT_ERR_PUSH_SIZE); + if (elem.size() > max_element_size) return set_error(serror, SCRIPT_ERR_PUSH_SIZE); } // Run the script interpreter. @@ -2018,6 +2021,12 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C // scriptSig and scriptPubKey must be evaluated sequentially on the same stack // rather than being simply concatenated (see CVE-2010-5141) std::vector > stack, stackCopy; + if (scriptPubKey.IsPayToScriptHash()) { + // Disable SCRIPT_VERIFY_REDUCED_DATA for pushing the P2SH redeemScript + if (!EvalScript(stack, scriptSig, flags & ~SCRIPT_VERIFY_REDUCED_DATA, checker, SigVersion::BASE, serror)) + // serror is set + return false; + } else if (!EvalScript(stack, scriptSig, flags, checker, SigVersion::BASE, serror)) // serror is set return false; @@ -2069,6 +2078,15 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C CScript pubKey2(pubKeySerialized.begin(), pubKeySerialized.end()); popstack(stack); + if (flags & SCRIPT_VERIFY_REDUCED_DATA) { + // We bypassed the reduced data check above to exempt redeemScript + // Now enforce it on the rest of the stack items here + // This is sufficient because P2SH requires scriptSig to be push-only + for (const valtype& elem : stack) { + if (elem.size() > MAX_SCRIPT_ELEMENT_SIZE_REDUCED) return set_error(serror, SCRIPT_ERR_PUSH_SIZE); + } + } + if (!EvalScript(stack, pubKey2, flags, checker, SigVersion::BASE, serror)) // serror is set return false; diff --git a/src/script/interpreter.h b/src/script/interpreter.h index 22e234fb76..e637bdbd38 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -143,7 +143,8 @@ enum : uint32_t { // Making unknown public key versions (in BIP 342 scripts) non-standard SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE = (1U << 20), - // TBD + // Enforce MAX_SCRIPT_ELEMENT_SIZE_REDUCED instead of MAX_SCRIPT_ELEMENT_SIZE + // The P2SH redeemScript push is exempted SCRIPT_VERIFY_REDUCED_DATA = (1U << 21), // Constants to point to the highest flag in use. Add new flags above this line. diff --git a/src/script/script.h b/src/script/script.h index f38d158119..2e532f9c55 100644 --- a/src/script/script.h +++ b/src/script/script.h @@ -26,6 +26,7 @@ // Maximum number of bytes pushable to the stack static const unsigned int MAX_SCRIPT_ELEMENT_SIZE = 520; +static const unsigned int MAX_SCRIPT_ELEMENT_SIZE_REDUCED = 256; // Maximum number of non-push operations per script static const int MAX_OPS_PER_SCRIPT = 201; From 67f909f722e11b93191df2dd19f703608ebedf49 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 3 Oct 2025 13:32:53 +0000 Subject: [PATCH 279/356] script: Limit Taproot annex to 256 bytes for SCRIPT_VERIFY_REDUCED_DATA (still unused) --- src/script/interpreter.cpp | 3 +++ src/script/interpreter.h | 1 + 2 files changed, 4 insertions(+) diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 78354c8d57..9a0b9a28e4 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -1956,6 +1956,9 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, if (stack.size() >= 2 && !stack.back().empty() && stack.back()[0] == ANNEX_TAG) { // Drop annex (this is non-standard; see IsWitnessStandard) const valtype& annex = SpanPopBack(stack); + if ((flags & SCRIPT_VERIFY_REDUCED_DATA) && annex.size() > MAX_SCRIPT_ELEMENT_SIZE_REDUCED) { + return set_error(serror, SCRIPT_ERR_PUSH_SIZE); + } execdata.m_annex_hash = (HashWriter{} << annex).GetSHA256(); execdata.m_annex_present = true; } else { diff --git a/src/script/interpreter.h b/src/script/interpreter.h index e637bdbd38..9c9fddae1b 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -145,6 +145,7 @@ enum : uint32_t { // Enforce MAX_SCRIPT_ELEMENT_SIZE_REDUCED instead of MAX_SCRIPT_ELEMENT_SIZE // The P2SH redeemScript push is exempted + // Taproot annex is also limited to MAX_SCRIPT_ELEMENT_SIZE_REDUCED SCRIPT_VERIFY_REDUCED_DATA = (1U << 21), // Constants to point to the highest flag in use. Add new flags above this line. From 8573a143b63190863539170668019e87083024ed Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 3 Oct 2025 14:29:08 +0000 Subject: [PATCH 280/356] script: Forbid Taproot annex entirely with SCRIPT_VERIFY_REDUCED_DATA (still unused) --- src/script/interpreter.cpp | 2 +- src/script/interpreter.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 9a0b9a28e4..0b1fc8aa4e 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -1956,7 +1956,7 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, if (stack.size() >= 2 && !stack.back().empty() && stack.back()[0] == ANNEX_TAG) { // Drop annex (this is non-standard; see IsWitnessStandard) const valtype& annex = SpanPopBack(stack); - if ((flags & SCRIPT_VERIFY_REDUCED_DATA) && annex.size() > MAX_SCRIPT_ELEMENT_SIZE_REDUCED) { + if (flags & SCRIPT_VERIFY_REDUCED_DATA) { return set_error(serror, SCRIPT_ERR_PUSH_SIZE); } execdata.m_annex_hash = (HashWriter{} << annex).GetSHA256(); diff --git a/src/script/interpreter.h b/src/script/interpreter.h index 9c9fddae1b..0ca44a0325 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -145,7 +145,7 @@ enum : uint32_t { // Enforce MAX_SCRIPT_ELEMENT_SIZE_REDUCED instead of MAX_SCRIPT_ELEMENT_SIZE // The P2SH redeemScript push is exempted - // Taproot annex is also limited to MAX_SCRIPT_ELEMENT_SIZE_REDUCED + // Taproot annex is also invalid SCRIPT_VERIFY_REDUCED_DATA = (1U << 21), // Constants to point to the highest flag in use. Add new flags above this line. From 1693af76950346ff1936d8e971fed3b5ae797583 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 3 Oct 2025 14:42:16 +0000 Subject: [PATCH 281/356] script: Forbid OP_IF in Tapscript with SCRIPT_VERIFY_REDUCED_DATA (still unused) Changed from original 5522cbdbd5: use opcode-level script.raw_iter() instead of byte-level `OP_IF in script` check in feature_taproot.py, which falsely matched 0x63 inside data pushes. Reproduces with --randomseed=3388244646249704915. Co-authored-by: Dathon Ohm --- src/script/interpreter.cpp | 5 +++++ src/script/interpreter.h | 1 + test/functional/feature_taproot.py | 10 ++++++++-- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 0b1fc8aa4e..1dcece0439 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -618,6 +618,11 @@ bool EvalScript(std::vector >& stack, const CScript& if (vch.size() > 1 || (vch.size() == 1 && vch[0] != 1)) { return set_error(serror, SCRIPT_ERR_TAPSCRIPT_MINIMALIF); } + // REDUCED_DATA bans OP_IF/OP_NOTIF entirely in tapscript; + // reuses MINIMALIF error code as this is a stricter form of the same restriction + if (flags & SCRIPT_VERIFY_REDUCED_DATA) { + return set_error(serror, SCRIPT_ERR_TAPSCRIPT_MINIMALIF); + } } // Under witness v0 rules it is only a policy rule, enabled through SCRIPT_VERIFY_MINIMALIF. if (sigversion == SigVersion::WITNESS_V0 && (flags & SCRIPT_VERIFY_MINIMALIF)) { diff --git a/src/script/interpreter.h b/src/script/interpreter.h index 0ca44a0325..fd7dc15108 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -146,6 +146,7 @@ enum : uint32_t { // Enforce MAX_SCRIPT_ELEMENT_SIZE_REDUCED instead of MAX_SCRIPT_ELEMENT_SIZE // The P2SH redeemScript push is exempted // Taproot annex is also invalid + // OP_IF is also forbidden inside Tapscript SCRIPT_VERIFY_REDUCED_DATA = (1U << 21), // Constants to point to the highest flag in use. Add new flags above this line. diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py index 1f91fe1743..f2b3a4b079 100755 --- a/test/functional/feature_taproot.py +++ b/test/functional/feature_taproot.py @@ -776,6 +776,7 @@ def spenders_taproot_active(): tap = taproot_construct(pubs[0], scripts) add_spender(spenders, "sighash/pk_codesep", tap=tap, leaf="pk_codesep", key=secs[1], **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) add_spender(spenders, "sighash/codesep_pk", tap=tap, leaf="codesep_pk", key=secs[1], codeseppos=0, **common, **SINGLE_SIG, **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) + common['standard'] = False add_spender(spenders, "sighash/branched_codesep/left", tap=tap, leaf="branched_codesep", key=secs[0], codeseppos=3, **common, inputs=[getter("sign"), b'\x01'], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) add_spender(spenders, "sighash/branched_codesep/right", tap=tap, leaf="branched_codesep", key=secs[1], codeseppos=6, **common, inputs=[getter("sign"), b''], **SIGHASH_BITFLIP, **ERR_SIG_SCHNORR) @@ -1039,10 +1040,13 @@ def big_spend_inputs(ctx): add_spender(spenders, "tapscript/disabled_checkmultisig", leaf="t1", **common, **SINGLE_SIG, failure={"leaf": "t3"}, **ERR_TAPSCRIPT_CHECKMULTISIG) add_spender(spenders, "tapscript/disabled_checkmultisigverify", leaf="t2", **common, **SINGLE_SIG, failure={"leaf": "t4"}, **ERR_TAPSCRIPT_CHECKMULTISIG) # Test that OP_IF and OP_NOTIF do not accept non-0x01 as truth value (the MINIMALIF rule is consensus in Tapscript) + assert 'standard' not in common + common['standard'] = False add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x02']}, **ERR_MINIMALIF) add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x03']}, **ERR_MINIMALIF) add_spender(spenders, "tapscript/minimalif", leaf="t5", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0001']}, **ERR_MINIMALIF) add_spender(spenders, "tapscript/minimalnotif", leaf="t6", **common, inputs=[getter("sign"), b'\x01'], failure={"inputs": [getter("sign"), b'\x0100']}, **ERR_MINIMALIF) + del common['standard'] # Test that 1-byte public keys (which are unknown) are acceptable but nonstandard with unrelated signatures, but 0-byte public keys are not valid. add_spender(spenders, "tapscript/unkpk/checksig", leaf="t16", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t7"}, **ERR_UNKNOWN_PUBKEY) add_spender(spenders, "tapscript/unkpk/checksigadd", leaf="t17", standard=False, **common, **SINGLE_SIG, failure={"leaf": "t10"}, **ERR_UNKNOWN_PUBKEY) @@ -1131,11 +1135,13 @@ def predict_sigops_ratio(n, dummy_size): dummylen = 0 while not predict_sigops_ratio(n, dummylen): dummylen += 1 - scripts = [("s", fn(n, pubkey)[0])] + script = fn(n, pubkey)[0] + scripts = [("s", script)] for _ in range(merkledepth): scripts = [scripts, random.choice(PARTNER_MERKLE_FN)] tap = taproot_construct(pubs[0], scripts) - standard = annex is None and dummylen <= 80 and len(pubkey) == 32 + has_conditional = any(op in (OP_IF, OP_NOTIF) for op, _, _ in script.raw_iter()) + standard = annex is None and dummylen <= 80 and len(pubkey) == 32 and not has_conditional add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random.randbytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random.randbytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO) # Future leaf versions From 077de802eb1b317f53f15cce0eedc1bf0f698cce Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 3 Oct 2025 14:07:23 +0000 Subject: [PATCH 282/356] script: Limit Taproot control block to 257 bytes for SCRIPT_VERIFY_REDUCED_DATA (still unused) --- src/script/interpreter.cpp | 3 ++- src/script/interpreter.h | 1 + test/functional/feature_taproot.py | 8 ++++++-- test/functional/test_framework/script.py | 1 + 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 1dcece0439..c823a615e3 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -1980,7 +1980,8 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, // Script path spending (stack size is >1 after removing optional annex) const valtype& control = SpanPopBack(stack); const valtype& script = SpanPopBack(stack); - if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > TAPROOT_CONTROL_MAX_SIZE || ((control.size() - TAPROOT_CONTROL_BASE_SIZE) % TAPROOT_CONTROL_NODE_SIZE) != 0) { + const unsigned int max_control_size = (flags & SCRIPT_VERIFY_REDUCED_DATA) ? TAPROOT_CONTROL_MAX_SIZE_REDUCED : TAPROOT_CONTROL_MAX_SIZE; + if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > max_control_size || ((control.size() - TAPROOT_CONTROL_BASE_SIZE) % TAPROOT_CONTROL_NODE_SIZE) != 0) { return set_error(serror, SCRIPT_ERR_TAPROOT_WRONG_CONTROL_SIZE); } execdata.m_tapleaf_hash = ComputeTapleafHash(control[0] & TAPROOT_LEAF_MASK, script); diff --git a/src/script/interpreter.h b/src/script/interpreter.h index fd7dc15108..0f641acc87 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -145,6 +145,7 @@ enum : uint32_t { // Enforce MAX_SCRIPT_ELEMENT_SIZE_REDUCED instead of MAX_SCRIPT_ELEMENT_SIZE // The P2SH redeemScript push is exempted + // Taproot control blocks are limited to TAPROOT_CONTROL_MAX_SIZE_REDUCED // Taproot annex is also invalid // OP_IF is also forbidden inside Tapscript SCRIPT_VERIFY_REDUCED_DATA = (1U << 21), diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py index f2b3a4b079..c0980dea9f 100755 --- a/test/functional/feature_taproot.py +++ b/test/functional/feature_taproot.py @@ -80,6 +80,7 @@ SIGHASH_ANYONECANPAY, SegwitV0SignatureMsg, TaggedHash, + TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED, TaprootSignatureMsg, is_op_success, taproot_construct, @@ -892,6 +893,8 @@ def mutate(spk): scripts = [scripts, random.choice(PARTNER_MERKLE_FN)] tap = taproot_construct(pubs[0], scripts) # Test that spends with a depth of 128 work, but 129 doesn't (even with a tree with weird Merkle branches in it). + assert 'standard' not in SINGLE_SIG + SINGLE_SIG['standard'] = False add_spender(spenders, "spendpath/merklelimit", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"leaf": "129deep"}, **ERR_CONTROLBLOCK_SIZE) # Test that flipping the negation bit invalidates spends. add_spender(spenders, "spendpath/negflag", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"negflag": lambda ctx: 1 - default_negflag(ctx)}, **ERR_WITNESS_PROGRAM_MISMATCH) @@ -905,6 +908,7 @@ def mutate(spk): add_spender(spenders, "spendpath/padlongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_controlblock(ctx) + random.randbytes(random.randrange(1, 32))}, **ERR_CONTROLBLOCK_SIZE) # Test that truncating the control block invalidates it. add_spender(spenders, "spendpath/trunclongcontrol", tap=tap, leaf="128deep", **SINGLE_SIG, key=secs[0], failure={"controlblock": lambda ctx: default_merklebranch(ctx)[0:random.randrange(1, 32)]}, **ERR_CONTROLBLOCK_SIZE) + del SINGLE_SIG['standard'] scripts = [("s", CScript([pubs[0], OP_CHECKSIG]))] tap = taproot_construct(pubs[1], scripts) @@ -1023,7 +1027,7 @@ def big_spend_inputs(ctx): ("t36", CScript([])), ] # Add many dummies to test huge trees - for j in range(100000): + for j in range(min(100000, 2**TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED - len(scripts))): scripts.append((None, CScript([OP_RETURN, random.randrange(100000)]))) random.shuffle(scripts) tap = taproot_construct(pubs[0], scripts) @@ -1141,7 +1145,7 @@ def predict_sigops_ratio(n, dummy_size): scripts = [scripts, random.choice(PARTNER_MERKLE_FN)] tap = taproot_construct(pubs[0], scripts) has_conditional = any(op in (OP_IF, OP_NOTIF) for op, _, _ in script.raw_iter()) - standard = annex is None and dummylen <= 80 and len(pubkey) == 32 and not has_conditional + standard = annex is None and dummylen <= 80 and len(pubkey) == 32 and not has_conditional and merkledepth <= TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED add_spender(spenders, "tapscript/sigopsratio_%i" % fn_num, tap=tap, leaf="s", annex=annex, hashtype=hashtype, key=secs[1], inputs=[getter("sign"), random.randbytes(dummylen)], standard=standard, failure={"inputs": [getter("sign"), random.randbytes(dummylen - 1)]}, **ERR_SIGOPS_RATIO) # Future leaf versions diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index d510cf9b1c..97008bda4d 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -29,6 +29,7 @@ LOCKTIME_THRESHOLD = 500000000 ANNEX_TAG = 0x50 +TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED = 7 LEAF_VERSION_TAPSCRIPT = 0xc0 def hash160(s): From 77af6c2c4bf1ce6889f73ee759ea3402da94c56e Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Sat, 4 Oct 2025 13:17:18 +0000 Subject: [PATCH 283/356] consensus: Enforce SCRIPT_VERIFY_REDUCED_DATA if DEPLOYMENT_REDUCED_DATA is active (still never) --- src/validation.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/validation.cpp b/src/validation.cpp index 4011ed0038..cbebdcf8d6 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2680,6 +2680,10 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const Ch flags |= SCRIPT_VERIFY_NULLDUMMY; } + if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_REDUCED_DATA)) { + flags |= SCRIPT_VERIFY_REDUCED_DATA; + } + return flags; } From 4a8d8d04906ed5c9a4ee715d3977a449ba81a106 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Sat, 4 Oct 2025 14:32:23 +0000 Subject: [PATCH 284/356] Limit datacarriersize config to MAX_OUTPUT_DATA_SIZE (=83 B) --- src/consensus/consensus.h | 2 ++ src/init.cpp | 3 ++- src/node/mempool_args.cpp | 4 ++++ test/functional/test_framework/test_node.py | 6 ++++++ 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/consensus/consensus.h b/src/consensus/consensus.h index cffe9cdafd..2c90442871 100644 --- a/src/consensus/consensus.h +++ b/src/consensus/consensus.h @@ -34,4 +34,6 @@ static constexpr unsigned int LOCKTIME_VERIFY_SEQUENCE = (1 << 0); */ static constexpr int64_t MAX_TIMEWARP = 600; +static constexpr unsigned int MAX_OUTPUT_DATA_SIZE{83}; + #endif // BITCOIN_CONSENSUS_CONSENSUS_H diff --git a/src/init.cpp b/src/init.cpp index 7b6b188c25..585bb9220c 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -716,7 +716,8 @@ void SetupServerArgs(ArgsManager& argsman, bool can_listen_ipc) argsman.AddArg("-datacarriercost", strprintf("Treat extra data in transactions as at least N vbytes per actual byte (default: %s)", DEFAULT_WEIGHT_PER_DATA_BYTE / 4.0), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); argsman.AddArg("-datacarrierfullcount", strprintf("Apply datacarriersize limit to all known datacarrier methods (default: %u)", DEFAULT_DATACARRIER_FULLCOUNT), ArgsManager::ALLOW_ANY | (DEFAULT_DATACARRIER_FULLCOUNT ? uint32_t{ArgsManager::DEBUG_ONLY} : 0), OptionsCategory::NODE_RELAY); argsman.AddArg("-datacarriersize", - strprintf("Maximum size of data in data carrier transactions we relay and mine, in bytes (default: %u)", + strprintf("Maximum size of data in data carrier transactions we relay and mine, in bytes (maximum %s, default: %u)", + MAX_OUTPUT_DATA_SIZE, MAX_OP_RETURN_RELAY), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); argsman.AddArg("-maxscriptsize", strprintf("Maximum size of scripts (including the entire witness stack) we relay and mine, in bytes (default: %s)", DEFAULT_SCRIPT_SIZE_POLICY_LIMIT), ArgsManager::ALLOW_ANY, OptionsCategory::NODE_RELAY); diff --git a/src/node/mempool_args.cpp b/src/node/mempool_args.cpp index 1d82d8f442..f2f36931cf 100644 --- a/src/node/mempool_args.cpp +++ b/src/node/mempool_args.cpp @@ -207,6 +207,10 @@ util::Result ApplyArgsManOptions(const ArgsManager& argsman, const CChainP if (argsman.GetBoolArg("-datacarrier", DEFAULT_ACCEPT_DATACARRIER)) { mempool_opts.max_datacarrier_bytes = argsman.GetIntArg("-datacarriersize", MAX_OP_RETURN_RELAY); + if (mempool_opts.max_datacarrier_bytes.value() > MAX_OUTPUT_DATA_SIZE) { + LogWarning("Limiting datacarriersize to %s", MAX_OUTPUT_DATA_SIZE); + mempool_opts.max_datacarrier_bytes = MAX_OUTPUT_DATA_SIZE; + } } else { mempool_opts.max_datacarrier_bytes = std::nullopt; } diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 35f98efcdd..c883acb3b9 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -28,6 +28,7 @@ serialization_fallback, ) from .descriptors import descsum_create +from .messages import MAX_OP_RETURN_RELAY from .messages import NODE_P2P_V2 from .p2p import P2P_SERVICES, P2P_SUBVERSION from .util import ( @@ -266,6 +267,11 @@ def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, env=None if env is not None: subp_env.update(env) + for arg in extra_args: + if arg.startswith('-datacarriersize=') and int(arg[17:]) > MAX_OP_RETURN_RELAY: + extra_args = list(extra_args) + extra_args.append('-acceptnonstdtxn=1') + self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs) self.running = True From 225df5904cc11df64aee48c8fa80b209508f4332 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 3 Oct 2025 15:06:45 +0000 Subject: [PATCH 285/356] consensus: Add no-op flags to CheckTxInputs function --- src/consensus/tx_verify.cpp | 2 +- src/consensus/tx_verify.h | 25 ++++++++++++++++++++++++- src/test/fuzz/coins_view.cpp | 2 +- src/txmempool.cpp | 4 +++- src/validation.cpp | 4 ++-- 5 files changed, 31 insertions(+), 6 deletions(-) diff --git a/src/consensus/tx_verify.cpp b/src/consensus/tx_verify.cpp index 95466b759c..42d1c804d2 100644 --- a/src/consensus/tx_verify.cpp +++ b/src/consensus/tx_verify.cpp @@ -161,7 +161,7 @@ int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& i return nSigOps; } -bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee) +bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee, const CheckTxInputsRules rules) { // are the actual inputs available? if (!inputs.HaveInputs(tx)) { diff --git a/src/consensus/tx_verify.h b/src/consensus/tx_verify.h index d2cf792cf3..5d8eea9d17 100644 --- a/src/consensus/tx_verify.h +++ b/src/consensus/tx_verify.h @@ -17,6 +17,29 @@ class TxValidationState; /** Transaction validation functions */ +class CheckTxInputsRules { + using underlying_type = unsigned int; + underlying_type m_flags; + constexpr explicit CheckTxInputsRules(underlying_type flags) noexcept : m_flags(flags) {} + + enum class Rule { + None = 0, + }; + +public: + using enum Rule; + + constexpr CheckTxInputsRules(Rule rule) noexcept : m_flags(static_cast(rule)) {} + + [[nodiscard]] constexpr bool test(CheckTxInputsRules rules) const noexcept { + return (m_flags & rules.m_flags) == rules.m_flags; + } + + [[nodiscard]] constexpr CheckTxInputsRules operator|(const CheckTxInputsRules other) const noexcept { + return CheckTxInputsRules{m_flags | other.m_flags}; + } +}; + namespace Consensus { /** * Check whether all inputs of this transaction are valid (no double spends and amounts) @@ -24,7 +47,7 @@ namespace Consensus { * @param[out] txfee Set to the transaction fee if successful. * Preconditions: tx.IsCoinBase() is false. */ -[[nodiscard]] bool CheckTxInputs(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee); +[[nodiscard]] bool CheckTxInputs(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee, CheckTxInputsRules rules); } // namespace Consensus /** Auxiliary functions for transaction validation (ideally should not be exposed) */ diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp index 06145e0323..b74a605f14 100644 --- a/src/test/fuzz/coins_view.cpp +++ b/src/test/fuzz/coins_view.cpp @@ -256,7 +256,7 @@ FUZZ_TARGET(coins_view, .init = initialize_coins_view) // It is not allowed to call CheckTxInputs if CheckTransaction failed return; } - if (Consensus::CheckTxInputs(transaction, state, coins_view_cache, fuzzed_data_provider.ConsumeIntegralInRange(0, std::numeric_limits::max()), tx_fee_out)) { + if (Consensus::CheckTxInputs(transaction, state, coins_view_cache, fuzzed_data_provider.ConsumeIntegralInRange(0, std::numeric_limits::max()), tx_fee_out, CheckTxInputsRules::None)) { assert(MoneyRange(tx_fee_out)); } }, diff --git a/src/txmempool.cpp b/src/txmempool.cpp index dd76521e4e..4e1d357825 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -877,7 +877,9 @@ void CTxMemPool::check(const CCoinsViewCache& active_coins_tip, int64_t spendhei TxValidationState dummy_state; // Not used. CheckTxInputs() should always pass CAmount txfee = 0; assert(!tx.IsCoinBase()); - assert(Consensus::CheckTxInputs(tx, dummy_state, mempoolDuplicate, spendheight, txfee)); + // Skip output size checks (CheckTxInputsRules::None), as these transactions already passed + // output size limits at mempool acceptance; this check only verifies UTXO consistency + assert(Consensus::CheckTxInputs(tx, dummy_state, mempoolDuplicate, spendheight, txfee, CheckTxInputsRules::None)); for (const auto& input: tx.vin) mempoolDuplicate.SpendCoin(input.prevout); AddCoins(mempoolDuplicate, tx, std::numeric_limits::max()); } diff --git a/src/validation.cpp b/src/validation.cpp index cbebdcf8d6..393b0ad497 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -985,7 +985,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // The mempool holds txs for the next block, so pass height+1 to CheckTxInputs const auto block_height_current = m_active_chainstate.m_chain.Height(); const auto block_height_next = block_height_current + 1; - if (!Consensus::CheckTxInputs(tx, state, m_view, block_height_next, ws.m_base_fees)) { + if (!Consensus::CheckTxInputs(tx, state, m_view, block_height_next, ws.m_base_fees, CheckTxInputsRules::None)) { return false; // state filled in by CheckTxInputs } @@ -2908,7 +2908,7 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, { CAmount txfee = 0; TxValidationState tx_state; - if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee)) { + if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee, CheckTxInputsRules::None)) { // Any transaction validation failure in ConnectBlock is a block consensus failure state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(), From 2002cd658a424160390cedd14686e7f4a43884a5 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 7 Oct 2025 13:13:00 +0000 Subject: [PATCH 286/356] consensus: Define CheckTxInputsRules::OutputSizeLimit flag (unused) to cap output scripts at 83 bytes --- src/consensus/consensus.h | 1 + src/consensus/tx_verify.cpp | 9 +++++++++ src/consensus/tx_verify.h | 1 + src/test/fuzz/coins_view.cpp | 2 +- 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/src/consensus/consensus.h b/src/consensus/consensus.h index 2c90442871..82b10e3906 100644 --- a/src/consensus/consensus.h +++ b/src/consensus/consensus.h @@ -34,6 +34,7 @@ static constexpr unsigned int LOCKTIME_VERIFY_SEQUENCE = (1 << 0); */ static constexpr int64_t MAX_TIMEWARP = 600; +static constexpr unsigned int MAX_OUTPUT_SCRIPT_SIZE{83}; static constexpr unsigned int MAX_OUTPUT_DATA_SIZE{83}; #endif // BITCOIN_CONSENSUS_CONSENSUS_H diff --git a/src/consensus/tx_verify.cpp b/src/consensus/tx_verify.cpp index 42d1c804d2..a2ebd99f7d 100644 --- a/src/consensus/tx_verify.cpp +++ b/src/consensus/tx_verify.cpp @@ -169,6 +169,15 @@ bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, strprintf("%s: inputs missing/spent", __func__)); } + // NOTE: CheckTransaction is arguably the more logical place to do this, but it's context-independent, so this is probably the next best place for now + if (rules.test(CheckTxInputsRules::OutputSizeLimit)) { + for (const auto& txout : tx.vout) { + if (txout.scriptPubKey.size() > MAX_OUTPUT_SCRIPT_SIZE) { + return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "bad-txns-vout-script-toolarge"); + } + } + } + CAmount nValueIn = 0; for (unsigned int i = 0; i < tx.vin.size(); ++i) { const COutPoint &prevout = tx.vin[i].prevout; diff --git a/src/consensus/tx_verify.h b/src/consensus/tx_verify.h index 5d8eea9d17..65e705abd4 100644 --- a/src/consensus/tx_verify.h +++ b/src/consensus/tx_verify.h @@ -24,6 +24,7 @@ class CheckTxInputsRules { enum class Rule { None = 0, + OutputSizeLimit = 1 << 0, }; public: diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp index b74a605f14..6cf7278420 100644 --- a/src/test/fuzz/coins_view.cpp +++ b/src/test/fuzz/coins_view.cpp @@ -256,7 +256,7 @@ FUZZ_TARGET(coins_view, .init = initialize_coins_view) // It is not allowed to call CheckTxInputs if CheckTransaction failed return; } - if (Consensus::CheckTxInputs(transaction, state, coins_view_cache, fuzzed_data_provider.ConsumeIntegralInRange(0, std::numeric_limits::max()), tx_fee_out, CheckTxInputsRules::None)) { + if (Consensus::CheckTxInputs(transaction, state, coins_view_cache, fuzzed_data_provider.ConsumeIntegralInRange(0, std::numeric_limits::max()), tx_fee_out, CheckTxInputsRules::OutputSizeLimit)) { assert(MoneyRange(tx_fee_out)); } }, From 97d167a02b8bb6a181499608cbc594d9ff3531a9 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 7 Oct 2025 13:48:39 +0000 Subject: [PATCH 287/356] consensus: When CheckTxInputsRules::OutputSizeLimit is enforced (still never), limit non-OP_RETURN scripts to 34 bytes --- src/consensus/consensus.h | 2 +- src/consensus/tx_verify.cpp | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/consensus/consensus.h b/src/consensus/consensus.h index 82b10e3906..b02773f490 100644 --- a/src/consensus/consensus.h +++ b/src/consensus/consensus.h @@ -34,7 +34,7 @@ static constexpr unsigned int LOCKTIME_VERIFY_SEQUENCE = (1 << 0); */ static constexpr int64_t MAX_TIMEWARP = 600; -static constexpr unsigned int MAX_OUTPUT_SCRIPT_SIZE{83}; +static constexpr unsigned int MAX_OUTPUT_SCRIPT_SIZE{34}; static constexpr unsigned int MAX_OUTPUT_DATA_SIZE{83}; #endif // BITCOIN_CONSENSUS_CONSENSUS_H diff --git a/src/consensus/tx_verify.cpp b/src/consensus/tx_verify.cpp index a2ebd99f7d..91215f5a1d 100644 --- a/src/consensus/tx_verify.cpp +++ b/src/consensus/tx_verify.cpp @@ -172,7 +172,8 @@ bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, // NOTE: CheckTransaction is arguably the more logical place to do this, but it's context-independent, so this is probably the next best place for now if (rules.test(CheckTxInputsRules::OutputSizeLimit)) { for (const auto& txout : tx.vout) { - if (txout.scriptPubKey.size() > MAX_OUTPUT_SCRIPT_SIZE) { + if (txout.scriptPubKey.empty()) continue; + if (txout.scriptPubKey.size() > ((txout.scriptPubKey[0] == OP_RETURN) ? MAX_OUTPUT_DATA_SIZE : MAX_OUTPUT_SCRIPT_SIZE)) { return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "bad-txns-vout-script-toolarge"); } } From 8257367348390745d0d5afc1cd847e583f09c6a0 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 7 Oct 2025 22:24:27 +0000 Subject: [PATCH 288/356] QA: rpc_getdescriptoractivity: Use RAW_OP_TRUE for test_no_address --- test/functional/rpc_getdescriptoractivity.py | 2 +- test/functional/test_framework/wallet.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/test/functional/rpc_getdescriptoractivity.py b/test/functional/rpc_getdescriptoractivity.py index a1d5add138..c756aed7e7 100755 --- a/test/functional/rpc_getdescriptoractivity.py +++ b/test/functional/rpc_getdescriptoractivity.py @@ -206,7 +206,7 @@ def test_receive_then_spend(self, node, wallet): def test_no_address(self, node, wallet): self.log.info("Test that activity is still reported for scripts without an associated address") - raw_wallet = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_P2PK) + raw_wallet = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_OP_TRUE) self.generate(raw_wallet, 100) no_addr_tx = raw_wallet.send_self_transfer(from_node=node) diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index dee90f9fd6..fca7b95afc 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -397,6 +397,8 @@ def create_self_transfer( return tx def sendrawtransaction(self, *, from_node, tx_hex, maxfeerate=0, **kwargs): + if self._mode == MiniWalletMode.RAW_OP_TRUE and 'ignore_rejects' not in kwargs: + kwargs['ignore_rejects'] = ('scriptsig-not-pushonly', 'scriptpubkey', 'bad-txns-input-script-unknown') txid = from_node.sendrawtransaction(hexstring=tx_hex, maxfeerate=maxfeerate, **kwargs) self.scan_tx(from_node.decoderawtransaction(tx_hex)) return txid From c609a453a00efdbb755d87e27a51de3a36884637 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 7 Oct 2025 15:43:43 +0000 Subject: [PATCH 289/356] QA: test_framework/wallet: Turn MiniWalletMode.RAW_P2PK into actually p2pkh --- test/functional/test_framework/wallet.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index fca7b95afc..4f69bd2821 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -78,7 +78,7 @@ class MiniWalletMode(Enum): ----------------+-------------------+-----------+----------+------------+---------- ADDRESS_OP_TRUE | anyone-can-spend | bech32m | yes | no | no RAW_OP_TRUE | anyone-can-spend | - (raw) | no | yes | no - RAW_P2PK | pay-to-public-key | - (raw) | yes | yes | yes + RAW_P2PK | p2pkh | base58 | yes | yes | yes """ ADDRESS_OP_TRUE = 1 RAW_OP_TRUE = 2 @@ -101,7 +101,7 @@ def __init__(self, test_node, *, mode=MiniWalletMode.ADDRESS_OP_TRUE, tag_name=N self._priv_key = ECKey() self._priv_key.set((1).to_bytes(32, 'big'), True) pub_key = self._priv_key.get_pubkey() - self._scriptPubKey = key_to_p2pk_script(pub_key.get_bytes()) + self._scriptPubKey = key_to_p2pkh_script(pub_key.get_bytes()) elif mode == MiniWalletMode.ADDRESS_OP_TRUE: internal_key = None if tag_name is None else compute_xonly_pubkey(hash256(tag_name.encode()))[0] self._address, self._taproot_info = create_deterministic_address_bcrt1_p2tr_op_true(internal_key) @@ -182,8 +182,9 @@ def sign_tx(self, tx, fixed_length=True): # with the DER header/skeleton data of 6 bytes added, plus 2 bytes scriptSig overhead # (OP_PUSHn and SIGHASH_ALL), this leads to a scriptSig target size of 73 bytes tx.vin[0].scriptSig = b'' - while not len(tx.vin[0].scriptSig) == 73: - tx.vin[0].scriptSig = b'' + while not len(tx.vin[0].scriptSig) == 107: + pub_key = self._priv_key.get_pubkey() + tx.vin[0].scriptSig = CScript([pub_key.get_bytes()]) sign_input_legacy(tx, 0, self._scriptPubKey, self._priv_key) if not fixed_length: break @@ -375,7 +376,7 @@ def create_self_transfer( if self._mode in (MiniWalletMode.RAW_OP_TRUE, MiniWalletMode.ADDRESS_OP_TRUE): vsize = Decimal(104) # anyone-can-spend elif self._mode == MiniWalletMode.RAW_P2PK: - vsize = Decimal(168) # P2PK (73 bytes scriptSig + 35 bytes scriptPubKey + 60 bytes other) + vsize = Decimal(192) # P2PK (73+34 bytes scriptSig + 25 bytes scriptPubKey + 60 bytes other) else: assert False if target_vsize and not fee: # respect fee_rate if target vsize is passed From 9194f6f05ffb29820b881bc8c9399794e2f84830 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 8 Oct 2025 13:17:52 +0000 Subject: [PATCH 290/356] RPC/Mempool: Provide tx memory usage in testmempoolaccept --- src/rpc/mempool.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/rpc/mempool.cpp b/src/rpc/mempool.cpp index e26de95b08..43a0b01a0e 100644 --- a/src/rpc/mempool.cpp +++ b/src/rpc/mempool.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -179,6 +180,7 @@ static RPCHelpMan testmempoolaccept() {RPCResult::Type::BOOL, "allowed", /*optional=*/true, "Whether this tx would be accepted to the mempool and pass client-specified maxfeerate. " "If not present, the tx was not fully validated due to a failure in another tx in the list."}, {RPCResult::Type::NUM, "vsize", /*optional=*/true, "Virtual transaction size as defined in BIP 141. This is different from actual serialized size for witness transactions as witness data is discounted (only present when 'allowed' is true)"}, + {RPCResult::Type::NUM, "usage", "Memory usage of transaction for this node"}, {RPCResult::Type::OBJ, "fees", /*optional=*/true, "Transaction fees (only present if 'allowed' is true)", { {RPCResult::Type::STR_AMOUNT, "base", "transaction fee in " + CURRENCY_UNIT}, @@ -254,6 +256,7 @@ static RPCHelpMan testmempoolaccept() UniValue result_inner(UniValue::VOBJ); result_inner.pushKV("txid", tx->GetHash().GetHex()); result_inner.pushKV("wtxid", tx->GetWitnessHash().GetHex()); + result_inner.pushKV("usage", RecursiveDynamicUsage(tx)); if (package_result.m_state.GetResult() == PackageValidationResult::PCKG_POLICY) { result_inner.pushKV("package-error", package_result.m_state.ToString()); } From b141420e597e41513306c03165b11acbe5584d5c Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 8 Oct 2025 13:19:14 +0000 Subject: [PATCH 291/356] QA: test_framework/mempool_util: Calibrate fill_mempool bulk tx size to expected memory usage --- test/functional/test_framework/mempool_util.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/test/functional/test_framework/mempool_util.py b/test/functional/test_framework/mempool_util.py index 56a9b4d262..869988e24c 100644 --- a/test/functional/test_framework/mempool_util.py +++ b/test/functional/test_framework/mempool_util.py @@ -69,6 +69,15 @@ def fill_mempool(test_framework, node, *, tx_sync_fun=None): confirmed_utxos = [ephemeral_miniwallet.get_utxo(confirmed_only=True) for _ in range(num_of_batches * tx_batch_size + 1)] assert_equal(len(confirmed_utxos), num_of_batches * tx_batch_size + 1) + # Calibrate dummy tx memory usage, since we rely on filling maxmempool + target_tx_usage = 68064 + tx = ephemeral_miniwallet.create_self_transfer(utxo_to_spend=confirmed_utxos[0])["tx"] + tx.vout.extend(txouts) + res = node.testmempoolaccept([tx.serialize().hex()])[0] + if res['usage'] > target_tx_usage: + excess_outputs = len(txouts) - (target_tx_usage * len(txouts) // res['usage']) + txouts = txouts[excess_outputs:] + test_framework.log.debug("Create a mempool tx that will be evicted") tx_to_be_evicted_id = ephemeral_miniwallet.send_self_transfer( from_node=node, utxo_to_spend=confirmed_utxos.pop(0), fee_rate=minrelayfee)["txid"] From e011d53b4dfc3650f792f81d3ded7c6912902fcf Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 8 Oct 2025 19:18:51 +0000 Subject: [PATCH 292/356] Bugfix: QA: mempool_limit: Use "usage" rather than "bytes" --- test/functional/mempool_limit.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py index 5051f8a030..1398b2cd54 100755 --- a/test/functional/mempool_limit.py +++ b/test/functional/mempool_limit.py @@ -121,7 +121,7 @@ def test_mid_package_eviction_success(self): num_big_parents = 3 # Need to be large enough to trigger eviction # (note that the mempool usage of a tx is about three times its vsize) - assert_greater_than(parent_vsize * num_big_parents * 3, current_info["maxmempool"] - current_info["bytes"]) + assert_greater_than(parent_vsize * num_big_parents * 3, current_info["maxmempool"] - current_info["usage"]) big_parent_txids = [] big_parent_wtxids = [] @@ -159,7 +159,7 @@ def test_mid_package_eviction_success(self): assert_equal(len(package_res["tx-results"][wtxid]["fees"]["effective-includes"]), 1) # Maximum size must never be exceeded. - assert_greater_than(node.getmempoolinfo()["maxmempool"], node.getmempoolinfo()["bytes"]) + assert_greater_than(node.getmempoolinfo()["maxmempool"], node.getmempoolinfo()["usage"]) # Package found in mempool still resulting_mempool_txids = node.getrawmempool() @@ -229,7 +229,7 @@ def test_mid_package_eviction(self): num_big_parents = 3 # Need to be large enough to trigger eviction # (note that the mempool usage of a tx is about three times its vsize) - assert_greater_than(parent_vsize * num_big_parents * 3, current_info["maxmempool"] - current_info["bytes"]) + assert_greater_than(parent_vsize * num_big_parents * 3, current_info["maxmempool"] - current_info["usage"]) parent_feerate = 10 * mempoolmin_feerate big_parent_txids = [] @@ -260,7 +260,7 @@ def test_mid_package_eviction(self): assert_equal(node.submitpackage(package_hex)["package_msg"], "transaction failed") # Maximum size must never be exceeded. - assert_greater_than(node.getmempoolinfo()["maxmempool"], node.getmempoolinfo()["bytes"]) + assert_greater_than(node.getmempoolinfo()["maxmempool"], node.getmempoolinfo()["usage"]) # Evicted transaction and its descendants must not be in mempool. resulting_mempool_txids = node.getrawmempool() @@ -329,7 +329,7 @@ def test_mid_package_replacement(self): assert len([tx_res for _, tx_res in res["tx-results"].items() if "error" in tx_res and tx_res["error"] == "bad-txns-inputs-missingorspent"]) # Maximum size must never be exceeded. - assert_greater_than(node.getmempoolinfo()["maxmempool"], node.getmempoolinfo()["bytes"]) + assert_greater_than(node.getmempoolinfo()["maxmempool"], node.getmempoolinfo()["usage"]) resulting_mempool_txids = node.getrawmempool() # The replacement should be successful. @@ -406,7 +406,7 @@ def run_test(self): # Needs to be large enough to trigger eviction # (note that the mempool usage of a tx is about three times its vsize) target_vsize_each = 50000 - assert_greater_than(target_vsize_each * 2 * 3, node.getmempoolinfo()["maxmempool"] - node.getmempoolinfo()["bytes"]) + assert_greater_than(target_vsize_each * 2 * 3, node.getmempoolinfo()["maxmempool"] - node.getmempoolinfo()["usage"]) # Should be a true CPFP: parent's feerate is just below mempool min feerate parent_feerate = mempoolmin_feerate - Decimal("0.0000001") # 0.01 sats/vbyte below min feerate # Parent + child is above mempool minimum feerate From fc62079ba1909281b419f33ae01acf7e7aa2d68b Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Tue, 7 Oct 2025 15:44:40 +0000 Subject: [PATCH 293/356] QA: test_framework: Use multiple OP_RETURNs to pad transactions rather than exceed MAX_OP_RETURN_RELAY --- test/functional/test_framework/util.py | 3 ++- test/functional/test_framework/wallet.py | 25 ++++++++++++++++++------ 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 4a35d9b869..59c7c78595 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -571,7 +571,8 @@ def check_node_connections(*, node, num_in, num_out): def gen_return_txouts(): from .messages import CTxOut from .script import CScript, OP_RETURN - txouts = [CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN, b'\x01'*67437]))] + txouts = [CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN, b'\x01'*80]))] * 733 + txouts.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN, b'\x01'*9]))) assert_equal(sum([len(txout.serialize()) for txout in txouts]), 67456) return txouts diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index 4f69bd2821..1376226648 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -33,6 +33,7 @@ CTxInWitness, CTxOut, hash256, + MAX_OP_RETURN_RELAY, ser_compact_size, ) from test_framework.script import ( @@ -124,13 +125,25 @@ def _bulk_tx(self, tx, target_vsize): if target_vsize < tx.get_vsize(): raise RuntimeError(f"target_vsize {target_vsize} is less than transaction virtual size {tx.get_vsize()}") - tx.vout.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN]))) - # determine number of needed padding bytes dummy_vbytes = target_vsize - tx.get_vsize() - # compensate for the increase of the compact-size encoded script length - # (note that the length encoding of the unpadded output script needs one byte) - dummy_vbytes -= len(ser_compact_size(dummy_vbytes)) - 1 - tx.vout[-1].scriptPubKey = CScript([OP_RETURN] + [OP_1] * dummy_vbytes) + if dummy_vbytes > 0: + # determine number of needed padding bytes + min_output_size = 8 + 1 + 1 + max_output_size = 8 + 1 + MAX_OP_RETURN_RELAY + n_max_outputs = (dummy_vbytes - min_output_size) // max_output_size + last_output_size = dummy_vbytes - (n_max_outputs * max_output_size) + n_outputs_before = len(tx.vout) + + tx.vout.extend([CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN] + [OP_1] * (MAX_OP_RETURN_RELAY - 1)))] * n_max_outputs) + tx.vout.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN] + [OP_1] * (last_output_size - 8 - 1 - 1)))) + + # compensate for the increase of the compact-size encoded script length + # (note that the length encoding of the unpadded output script needs one byte) + extra_len_size = len(ser_compact_size(len(tx.vout))) - 1 + if extra_len_size: + assert tx.vout[n_outputs_before].scriptPubKey[-extra_len_size:] == bytes([OP_1] * extra_len_size) + tx.vout[n_outputs_before] = CTxOut(nValue=0, scriptPubKey = CScript(tx.vout[n_outputs_before].scriptPubKey[:-extra_len_size])) + assert_equal(tx.get_vsize(), target_vsize) def get_balance(self): From b249e998c2cfd3e818c98631891db75484e01eb6 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Sat, 4 Oct 2025 14:34:08 +0000 Subject: [PATCH 294/356] consensus: Enforce CheckTxInputsRules::OutputSizeLimit when DEPLOYMENT_REDUCED_DATA is active (never yet) --- src/validation.cpp | 6 ++++-- test/functional/feature_segwit.py | 16 ++++++---------- test/functional/mempool_dust.py | 6 +----- 3 files changed, 11 insertions(+), 17 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 393b0ad497..1915660685 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -985,7 +985,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // The mempool holds txs for the next block, so pass height+1 to CheckTxInputs const auto block_height_current = m_active_chainstate.m_chain.Height(); const auto block_height_next = block_height_current + 1; - if (!Consensus::CheckTxInputs(tx, state, m_view, block_height_next, ws.m_base_fees, CheckTxInputsRules::None)) { + if (!Consensus::CheckTxInputs(tx, state, m_view, block_height_next, ws.m_base_fees, CheckTxInputsRules::OutputSizeLimit)) { return false; // state filled in by CheckTxInputs } @@ -2892,6 +2892,8 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, CCheckQueueControl control(fScriptChecks && parallel_script_checks ? &m_chainman.GetCheckQueue() : nullptr); std::vector txsdata(block.vtx.size()); + const auto chk_input_rules{DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_REDUCED_DATA) ? CheckTxInputsRules::OutputSizeLimit : CheckTxInputsRules::None}; + std::vector prevheights; CAmount nFees = 0; int nInputs = 0; @@ -2908,7 +2910,7 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, { CAmount txfee = 0; TxValidationState tx_state; - if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee, CheckTxInputsRules::None)) { + if (!Consensus::CheckTxInputs(tx, tx_state, view, pindex->nHeight, txfee, chk_input_rules)) { // Any transaction validation failure in ConnectBlock is a block consensus failure state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(), diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index a2fab6714b..ef0c13552f 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -402,14 +402,12 @@ def run_test(self): [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # p2sh multisig with compressed keys should always be spendable spendable_anytime.extend([p2sh]) - # bare multisig can be watched and signed, but is not treated as ours - solvable_after_importaddress.extend([bare]) # P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh]) else: [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) # normal P2PKH and P2PK with compressed keys should always be spendable - spendable_anytime.extend([p2pkh, p2pk]) + spendable_anytime.extend([p2pkh]) # P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) # P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable @@ -421,14 +419,12 @@ def run_test(self): [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # p2sh multisig with uncompressed keys should always be spendable spendable_anytime.extend([p2sh]) - # bare multisig can be watched and signed, but is not treated as ours - solvable_after_importaddress.extend([bare]) # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen unseen_anytime.extend([p2wsh, p2sh_p2wsh]) else: [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) # normal P2PKH and P2PK with uncompressed keys should always be spendable - spendable_anytime.extend([p2pkh, p2pk]) + spendable_anytime.extend([p2pkh]) # P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) # Witness output types with uncompressed keys are never seen @@ -439,11 +435,11 @@ def run_test(self): if v['isscript']: # Multisig without private is not seen after addmultisigaddress, but seen after importaddress [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) - solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh]) + solvable_after_importaddress.extend([p2sh, p2wsh, p2sh_p2wsh]) else: [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) # normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen - solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh]) + solvable_anytime.extend([p2pkh, p2wpkh, p2sh_p2wpkh]) # P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]) @@ -452,13 +448,13 @@ def run_test(self): if v['isscript']: [bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v) # Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress - solvable_after_importaddress.extend([bare, p2sh]) + solvable_after_importaddress.extend([p2sh]) # P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen unseen_anytime.extend([p2wsh, p2sh_p2wsh]) else: [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v) # normal P2PKH and P2PK with uncompressed keys should always be seen - solvable_anytime.extend([p2pkh, p2pk]) + solvable_anytime.extend([p2pkh]) # P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh]) # Witness output types with uncompressed keys are never seen diff --git a/test/functional/mempool_dust.py b/test/functional/mempool_dust.py index 937e77fbd4..48da212d4b 100755 --- a/test/functional/mempool_dust.py +++ b/test/functional/mempool_dust.py @@ -109,8 +109,6 @@ def run_test(self): _, pubkey = generate_keypair(compressed=True) output_scripts = ( - (key_to_p2pk_script(uncompressed_pubkey), "P2PK (uncompressed)"), - (key_to_p2pk_script(pubkey), "P2PK (compressed)"), (key_to_p2pkh_script(pubkey), "P2PKH"), (script_to_p2sh_script(CScript([OP_TRUE])), "P2SH"), (key_to_p2wpkh_script(pubkey), "P2WPKH"), @@ -118,9 +116,7 @@ def run_test(self): (output_key_to_p2tr_script(pubkey[1:]), "P2TR"), # witness programs for segwitv2+ can be between 2 and 40 bytes (program_to_witness_script(2, b'\x66' * 2), "P2?? (future witness version 2)"), - (program_to_witness_script(16, b'\x77' * 40), "P2?? (future witness version 16)"), - # largest possible output script considered standard - (keys_to_multisig_script([uncompressed_pubkey]*3), "bare multisig (m-of-3)"), + (program_to_witness_script(16, b'\x77' * 32), "P2?? (future witness version 16)"), (CScript([OP_RETURN, b'superimportanthash']), "null data (OP_RETURN)"), ) From 09591b3cabd2a497e5c3484d6469b945993d37d3 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Sat, 4 Oct 2025 12:59:45 +0000 Subject: [PATCH 295/356] Policy: Do not allow user to bypass SCRIPT_VERIFY_DISCOURAGE_{UPGRADABLE_WITNESS_PROGRAM,UPGRADABLE_TAPROOT_VERSION,OP_SUCCESS} --- src/validation.cpp | 9 --------- 1 file changed, 9 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 1915660685..792711e065 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -1445,15 +1445,6 @@ unsigned int PolicyScriptVerifyFlags(const ignore_rejects_type& ignore_rejects) if (ignore_rejects.count("non-mandatory-script-verify-flag-upgradable-nops")) { flags &= ~SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_NOPS; } - if (ignore_rejects.count("non-mandatory-script-verify-flag-upgradable-witness_program")) { - flags &= ~SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM; - } - if (ignore_rejects.count("non-mandatory-script-verify-flag-upgradable-taproot_version")) { - flags &= ~SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION; - } - if (ignore_rejects.count("non-mandatory-script-verify-flag-upgradable-op_success")) { - flags &= ~SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS; - } if (ignore_rejects.count("non-mandatory-script-verify-flag-upgradable-pubkeytype")) { flags &= ~SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE; } From ccb49ca4d92969a5225b32a7c5be8e18de673157 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Sat, 4 Oct 2025 14:39:50 +0000 Subject: [PATCH 296/356] consensus: Enforce SCRIPT_VERIFY_DISCOURAGE_{UPGRADABLE_WITNESS_PROGRAM,UPGRADABLE_TAPROOT_VERSION,OP_SUCCESS} on blocks when DEPLOYMENT_REDUCED_DATA is active (never yet) --- src/validation.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/validation.cpp b/src/validation.cpp index 792711e065..bfc9c21542 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2672,7 +2672,10 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const Ch } if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_REDUCED_DATA)) { - flags |= SCRIPT_VERIFY_REDUCED_DATA; + flags |= SCRIPT_VERIFY_REDUCED_DATA | + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM | + SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION | + SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS; } return flags; From 1891f4cfc0d00eb883e7941c9a8e618791b2ea30 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 31 May 2017 22:35:37 +0000 Subject: [PATCH 297/356] Define a service bit for BIP148 Github-Pull: #10532 Rebased-From: cd74a23fcf9588199e196ab31bc64972400c2027 --- src/protocol.cpp | 1 + src/protocol.h | 3 +++ 2 files changed, 4 insertions(+) diff --git a/src/protocol.cpp b/src/protocol.cpp index 589ff53efb..1668e264b8 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -103,6 +103,7 @@ static std::string serviceFlagToStr(size_t bit) case NODE_UTREEXO_ARCHIVE: return "UTREEXO_ARCHIVE"; case NODE_UTREEXO_TMP: return "UTREEXO_TMP?"; case NODE_REPLACE_BY_FEE: return "REPLACE_BY_FEE?"; + case NODE_BIP148: return "BIP148"; case NODE_MALICIOUS: return "MALICIOUS?"; // Not using default, so we get warned when a case is missing } diff --git a/src/protocol.h b/src/protocol.h index ebfd139c25..0a7a15e31a 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -347,6 +347,9 @@ enum ServiceFlags : uint64_t { NODE_REPLACE_BY_FEE = (1 << 26), + // NODE_BIP148 means the node enforces BIP 148's mandatory Segwit activation beginning August 1, 2017 + NODE_BIP148 = (1 << 27), + NODE_MALICIOUS = (1 << 29), }; From 8d372599dcf5bb81ed40a3d3c58c5992ba9ae91f Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Mon, 20 Oct 2025 00:28:40 +0000 Subject: [PATCH 298/356] Add questionmark to end of BIP148 service bit string, and add to bitcoin-cli --- src/bitcoin-cli.cpp | 3 +++ src/protocol.cpp | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 86435410f5..99569e9cf7 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -483,6 +483,8 @@ class NetinfoRequestHandler : public BaseRequestHandler str += 'T'; } else if (s == "UTREEXO_TMP?") { str += 'y'; + } else if (s == "BIP148?") { + str += '1'; } else { str += ToLower(s[0]); } @@ -765,6 +767,7 @@ class NetinfoRequestHandler : public BaseRequestHandler " \"T\" - UTREEXO_ARCHIVE peer can handle Utreexo proof requests for all historical blocks\n" " \"y\" - UTREEXO_TMP? peer can handle Utreexo proof requests\n" " \"r\" - REPLACE_BY_FEE? peer supports replacement of transactions without BIP 125 signalling\n" + " \"1\" - BIP148? peer enforces the BIP148 User-Activated SoftFork\n" " \"m\" - MALICIOUS? peer openly seeks to aid in bypassing network policy/spam filters (OR to sabotage nodes that seek to)\n" " \"u\" - UNKNOWN: unrecognized bit flag\n" " v Version of transport protocol used for the connection\n" diff --git a/src/protocol.cpp b/src/protocol.cpp index 1668e264b8..1e492dadfc 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -103,7 +103,7 @@ static std::string serviceFlagToStr(size_t bit) case NODE_UTREEXO_ARCHIVE: return "UTREEXO_ARCHIVE"; case NODE_UTREEXO_TMP: return "UTREEXO_TMP?"; case NODE_REPLACE_BY_FEE: return "REPLACE_BY_FEE?"; - case NODE_BIP148: return "BIP148"; + case NODE_BIP148: return "BIP148?"; case NODE_MALICIOUS: return "MALICIOUS?"; // Not using default, so we get warned when a case is missing } From 367b3446236e6b71fc86b18b69fc451a720f78d4 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Wed, 31 May 2017 22:37:13 +0000 Subject: [PATCH 299/356] Preferentially peer with nodes enforcing BIP148 to avoid partitioning risk Github-Pull: #10532 Rebased-From: e42a2f6beb61df3e3a201804cf3bcce6b00c88ba --- src/init.cpp | 2 +- src/net_processing.cpp | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/init.cpp b/src/init.cpp index 585bb9220c..6613fd0c73 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -975,7 +975,7 @@ namespace { // Variables internal to initialization process only int nMaxConnections; int available_fds; -ServiceFlags g_local_services = ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS); +ServiceFlags g_local_services = ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_BIP148); int64_t peer_connect_timeout; std::set g_enabled_filter_types; diff --git a/src/net_processing.cpp b/src/net_processing.cpp index f810e9f12b..16f39ec668 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1657,13 +1657,14 @@ bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const { + // We want to preferentially peer with other nodes that enforce BIP148, in case of a chain split if (services & NODE_NETWORK_LIMITED) { // Limited peers are desirable when we are close to the tip. if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) { - return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS); + return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_BIP148); } } - return ServiceFlags(NODE_NETWORK | NODE_WITNESS); + return ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BIP148); } PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const From a065a596b15e4513f4732f253bb79d65245fec23 Mon Sep 17 00:00:00 2001 From: moneybadger1 Date: Sat, 1 Nov 2025 13:47:00 -0600 Subject: [PATCH 300/356] tests: fix feature_cltv, feature_dersig, mempool_accept, and mempool_accept_wtxid --- test/functional/feature_cltv.py | 21 +++++++++++---------- test/functional/feature_dersig.py | 13 +++++++------ test/functional/mempool_accept.py | 1 + test/functional/mempool_accept_wtxid.py | 21 +++++++++++++++------ 4 files changed, 34 insertions(+), 22 deletions(-) diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py index 81cc10a5ad..08108884e5 100755 --- a/test/functional/feature_cltv.py +++ b/test/functional/feature_cltv.py @@ -167,16 +167,17 @@ def run_test(self): # rejected from the mempool for exactly that reason. spendtx_txid = spendtx.hash spendtx_wtxid = spendtx.getwtxid() - assert_equal( - [{ - 'txid': spendtx_txid, - 'wtxid': spendtx_wtxid, - 'allowed': False, - 'reject-reason': tx_rej + expected_cltv_reject_reason, - 'reject-details': tx_rej + expected_cltv_reject_reason + f", input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:{coin_vout}" - }], - self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0), - ) + expected = { + 'txid': spendtx_txid, + 'wtxid': spendtx_wtxid, + 'allowed': False, + 'reject-reason': tx_rej + expected_cltv_reject_reason, + 'reject-details': tx_rej + expected_cltv_reject_reason + f", input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:{coin_vout}", + } + result = self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0)[0] + # skip for now + result.pop('usage') + assert_equal(result, expected) # Now we verify that a block with this transaction is also invalid. block.vtx[1] = spendtx diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py index 2a7eb0d0f4..8b79a92df0 100755 --- a/test/functional/feature_dersig.py +++ b/test/functional/feature_dersig.py @@ -118,17 +118,18 @@ def run_test(self): # rejected from the mempool for exactly that reason. spendtx_txid = spendtx.hash spendtx_wtxid = spendtx.getwtxid() - assert_equal( - [{ + expected = { 'txid': spendtx_txid, 'wtxid': spendtx_wtxid, 'allowed': False, 'reject-reason': 'mempool-script-verify-flag-failed (Non-canonical DER signature)', 'reject-details': 'mempool-script-verify-flag-failed (Non-canonical DER signature), ' + - f"input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:0" - }], - self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0), - ) + f"input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:0", + } + result = self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0)[0] + # skip for now + result.pop('usage') + assert_equal(result, expected) # Now we verify that a block with this transaction is also invalid. block.vtx.append(spendtx) diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index 38dd5b5001..47a1ba21f1 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -69,6 +69,7 @@ def check_mempool_result(self, result_expected, *args, **kwargs): for r in result_test: # Skip these checks for now r.pop('wtxid') + r.pop('usage') if "fees" in r: r["fees"].pop("effective-feerate") r["fees"].pop("effective-includes") diff --git a/test/functional/mempool_accept_wtxid.py b/test/functional/mempool_accept_wtxid.py index f74d00e37c..610b4a0962 100755 --- a/test/functional/mempool_accept_wtxid.py +++ b/test/functional/mempool_accept_wtxid.py @@ -96,20 +96,29 @@ def run_test(self): assert_equal(node.getmempoolinfo()["unbroadcastcount"], 0) # testmempoolaccept reports the "already in mempool" error - assert_equal(node.testmempoolaccept([child_one.serialize().hex()]), [{ + expected = { "txid": child_one_txid, "wtxid": child_one_wtxid, "allowed": False, "reject-reason": "txn-already-in-mempool", - "reject-details": "txn-already-in-mempool" - }]) - assert_equal(node.testmempoolaccept([child_two.serialize().hex()])[0], { + "reject-details": "txn-already-in-mempool", + } + result = node.testmempoolaccept([child_one.serialize().hex()])[0] + # skip for now + result.pop('usage') + assert_equal(result, expected) + + expected = { "txid": child_two_txid, "wtxid": child_two_wtxid, "allowed": False, "reject-reason": "txn-same-nonwitness-data-in-mempool", - "reject-details": "txn-same-nonwitness-data-in-mempool" - }) + "reject-details": "txn-same-nonwitness-data-in-mempool", + } + result = node.testmempoolaccept([child_two.serialize().hex()])[0] + # skip for now + result.pop('usage') + assert_equal(result, expected) # sendrawtransaction will not throw but quits early when the exact same transaction is already in mempool node.sendrawtransaction(child_one.serialize().hex()) From f98fe90d3512cab1242ff9c32843f0e38894aa56 Mon Sep 17 00:00:00 2001 From: moneybadger1 Date: Sat, 1 Nov 2025 13:44:30 -0600 Subject: [PATCH 301/356] tests: fix BIP148 service bit --- test/functional/feature_anchors.py | 10 ++++++---- test/functional/test_framework/messages.py | 1 + test/functional/test_framework/p2p.py | 3 ++- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/test/functional/feature_anchors.py b/test/functional/feature_anchors.py index 154461e739..9638ccc375 100755 --- a/test/functional/feature_anchors.py +++ b/test/functional/feature_anchors.py @@ -8,7 +8,7 @@ from test_framework.p2p import P2PInterface, P2P_SERVICES from test_framework.socks5 import Socks5Configuration, Socks5Server -from test_framework.messages import CAddress, hash256 +from test_framework.messages import CAddress, hash256, ser_compact_size from test_framework.test_framework import BitcoinTestFramework from test_framework.util import check_node_connections, assert_equal, p2p_port @@ -113,7 +113,7 @@ def run_test(self): caddr.ip, port_str = ONION_ADDR.split(":") caddr.port = int(port_str) # TorV3 addrv2 serialization: - # time(4) | services(1) | networkID(1) | address length(1) | address(32) + # time(4) | services(CompactSize) | networkID(1) | address length(CompactSize) | address(32) expected_pubkey = caddr.serialize_v2()[7:39].hex() # position of services byte of first addr in anchors.dat @@ -122,7 +122,7 @@ def run_test(self): data = bytes() with open(node_anchors_path, "rb") as file_handler: data = file_handler.read() - assert_equal(data[services_index], 0x00) # services == NONE + assert_equal(data[services_index], 0x00) # services == NONE (CompactSize encoded as 1 byte) anchors2 = data.hex() assert expected_pubkey in anchors2 @@ -131,7 +131,9 @@ def run_test(self): # This is necessary because on restart we will not attempt an anchor connection # to a host without our required services, even if its address is in the anchors.dat file new_data = bytearray(data)[:-32] - new_data[services_index] = P2P_SERVICES + # Replace the 1-byte services field (0x00) with the CompactSize-encoded P2P_SERVICES (5 bytes for 0x08000009) + services_bytes = ser_compact_size(P2P_SERVICES) + new_data = new_data[:services_index] + services_bytes + new_data[services_index+1:] new_data_hash = hash256(new_data) file_handler.write(new_data + new_data_hash) diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index ae987e64d5..73217d5dd2 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -57,6 +57,7 @@ NODE_NETWORK_LIMITED = (1 << 10) NODE_P2P_V2 = (1 << 11) NODE_REPLACE_BY_FEE = (1 << 26) +NODE_BIP148 = (1 << 27) MSG_TX = 1 MSG_BLOCK = 2 diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index 1cd962b559..540ae2c4af 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -73,6 +73,7 @@ msg_wtxidrelay, NODE_NETWORK, NODE_WITNESS, + NODE_BIP148, MAGIC_BYTES, sha256, ) @@ -95,7 +96,7 @@ # Version 70016 supports wtxid relay P2P_VERSION = 70016 # The services that this test framework offers in its `version` message -P2P_SERVICES = NODE_NETWORK | NODE_WITNESS +P2P_SERVICES = NODE_NETWORK | NODE_WITNESS | NODE_BIP148 # The P2P user agent string that this test framework sends in its `version` message P2P_SUBVERSION = "/python-p2p-tester:0.0.3/" # Value for relay that this test framework sends in its `version` message From 4f371cfa2cd0a2cb1262d6c08e84dc8af117873c Mon Sep 17 00:00:00 2001 From: 3c853b6299 <3c853b6299@pm.me> Date: Sat, 1 Nov 2025 11:15:35 -0600 Subject: [PATCH 302/356] test: Adapt functional tests to MAX_OUTPUT_SCRIPT_SIZE=34 consensus limit; add reduced_data deployment name to allow regtest RPC access for testing --- test/functional/data/invalid_txs.py | 6 +- test/functional/mempool_dust.py | 76 +++++++++++ test/functional/mempool_sigoplimit.py | 176 ++++++++++++++++++++----- test/functional/tool_utxo_to_sqlite.py | 50 ++++--- 4 files changed, 259 insertions(+), 49 deletions(-) diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py index f96059d4ee..51945fc875 100644 --- a/test/functional/data/invalid_txs.py +++ b/test/functional/data/invalid_txs.py @@ -223,10 +223,14 @@ class TooManySigops(BadTxTemplate): block_reject_reason = "bad-blk-sigops, out-of-bounds SigOpCount" def get_tx(self): + # Put OP_CHECKSIGs in scriptSig (input) instead of scriptPubKey (output) + # to avoid violating MAX_OUTPUT_SCRIPT_SIZE=34 consensus limit. + # Sigops are counted from both input and output scripts. lotsa_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS)) return create_tx_with_script( self.spend_tx, 0, - output_script=lotsa_checksigs, + script_sig=lotsa_checksigs, + output_script=basic_p2sh, # 23-byte P2SH, well under 34-byte limit amount=1) def getDisabledOpcodeTemplate(opcode): diff --git a/test/functional/mempool_dust.py b/test/functional/mempool_dust.py index 48da212d4b..557c2938f7 100755 --- a/test/functional/mempool_dust.py +++ b/test/functional/mempool_dust.py @@ -99,10 +99,86 @@ def test_dustrelay(self): assert_equal(self.nodes[0].getrawmempool(), []) + def test_output_size_limit(self): + """Test that outputs exceeding MAX_OUTPUT_SCRIPT_SIZE (34 bytes) are rejected""" + self.log.info("Test MAX_OUTPUT_SCRIPT_SIZE limit (34 bytes)") + + node = self.nodes[0] + _, pubkey = generate_keypair(compressed=True) + + # Test Case 1: Scripts at or under 34 bytes should be accepted + self.log.info("-> Testing scripts at or under 34-byte limit (should pass)") + + passing_scripts = [ + (key_to_p2pkh_script(pubkey), "P2PKH", 25), + (key_to_p2wpkh_script(pubkey), "P2WPKH", 22), + (script_to_p2wsh_script(CScript([OP_TRUE])), "P2WSH", 34), + (script_to_p2sh_script(CScript([OP_TRUE])), "P2SH", 23), + (output_key_to_p2tr_script(pubkey[1:]), "P2TR", 34), + ] + + for script, name, expected_size in passing_scripts: + assert_equal(len(script), expected_size) + tx = self.wallet.create_self_transfer()["tx"] + tx.vout.append(CTxOut(nValue=1000, scriptPubKey=script)) + res = node.testmempoolaccept([tx.serialize().hex()])[0] + assert_equal(res['allowed'], True) + self.log.info(f" ✓ {name} ({expected_size} bytes) accepted") + + # Test Case 2: P2PK with compressed pubkey (35 bytes) should be rejected + self.log.info("-> Testing P2PK compressed (35 bytes) - should be rejected") + p2pk_script = key_to_p2pk_script(pubkey) + assert_equal(len(p2pk_script), 35) + + tx = self.wallet.create_self_transfer()["tx"] + tx.vout.append(CTxOut(nValue=1000, scriptPubKey=p2pk_script)) + res = node.testmempoolaccept([tx.serialize().hex()])[0] + assert_equal(res['allowed'], False) + assert 'output-script-size' in res['reject-reason'].lower() or \ + 'bad-txns' in res['reject-reason'].lower(), \ + f"Expected output-script-size error, got: {res['reject-reason']}" + self.log.info(f" ✓ P2PK compressed (35 bytes) correctly rejected: {res['reject-reason']}") + + # Test Case 3: 1-of-1 bare multisig (37 bytes) should be rejected + self.log.info("-> Testing 1-of-1 bare multisig (37 bytes) - should be rejected") + multisig_script = keys_to_multisig_script([pubkey], k=1) + assert_equal(len(multisig_script), 37) + + tx = self.wallet.create_self_transfer()["tx"] + tx.vout.append(CTxOut(nValue=1000, scriptPubKey=multisig_script)) + res = node.testmempoolaccept([tx.serialize().hex()])[0] + assert_equal(res['allowed'], False) + assert 'output-script-size' in res['reject-reason'].lower() or \ + 'bad-txns' in res['reject-reason'].lower(), \ + f"Expected output-script-size error, got: {res['reject-reason']}" + self.log.info(f" ✓ 1-of-1 bare multisig (37 bytes) correctly rejected: {res['reject-reason']}") + + # Test Case 4: Boundary testing (exactly 34 vs 35 bytes) + self.log.info("-> Testing boundary conditions") + + # Exactly 34 bytes should pass (create a witness program v0 with 32-byte data) + script_34 = CScript([0, bytes(32)]) # OP_0 + 32 bytes = 34 bytes + assert_equal(len(script_34), 34) + tx = self.wallet.create_self_transfer()["tx"] + tx.vout.append(CTxOut(nValue=1000, scriptPubKey=script_34)) + res = node.testmempoolaccept([tx.serialize().hex()])[0] + assert_equal(res['allowed'], True) + self.log.info(f" ✓ Exactly 34 bytes accepted (boundary)") + + # 35 bytes should fail (create a witness program v0 with 33-byte data - invalid but tests size) + script_35 = CScript([0, bytes(33)]) # OP_0 + 33 bytes = 35 bytes + assert_equal(len(script_35), 35) + tx = self.wallet.create_self_transfer()["tx"] + tx.vout.append(CTxOut(nValue=1000, scriptPubKey=script_35)) + res = node.testmempoolaccept([tx.serialize().hex()])[0] + assert_equal(res['allowed'], False) + self.log.info(f" ✓ 35 bytes rejected (boundary): {res['reject-reason']}") + def run_test(self): self.wallet = MiniWallet(self.nodes[0]) self.test_dustrelay() + self.test_output_size_limit() # prepare output scripts of each standard type _, uncompressed_pubkey = generate_keypair(compressed=False) diff --git a/test/functional/mempool_sigoplimit.py b/test/functional/mempool_sigoplimit.py index 4696a846cf..16c2a42750 100755 --- a/test/functional/mempool_sigoplimit.py +++ b/test/functional/mempool_sigoplimit.py @@ -13,11 +13,13 @@ CTxIn, CTxInWitness, CTxOut, + MAX_OP_RETURN_RELAY, WITNESS_SCALE_FACTOR, tx_from_hex, ) from test_framework.script import ( CScript, + OP_1, OP_2DUP, OP_CHECKMULTISIG, OP_CHECKSIG, @@ -87,16 +89,84 @@ def test_sigops_limit(self, bytes_per_sigop, num_sigops): [OP_CHECKSIG]*num_singlesigops + [OP_ENDIF, OP_TRUE] ) - # use a 256-byte data-push as lower bound in the output script, in order - # to avoid having to compensate for tx size changes caused by varying - # length serialization sizes (both for scriptPubKey and data-push lengths) - tx = self.create_p2wsh_spending_tx(witness_script, CScript([OP_RETURN, b'X'*256])) - # bump the tx to reach the sigop-limit equivalent size by padding the datacarrier output - assert_greater_than_or_equal(sigop_equivalent_vsize, tx.get_vsize()) - vsize_to_pad = sigop_equivalent_vsize - tx.get_vsize() - tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'X'*(256+vsize_to_pad)]) - assert_equal(sigop_equivalent_vsize, tx.get_vsize()) + # Create transaction ONCE with a small output + # This creates ONE funding transaction in the mempool + tx = self.create_p2wsh_spending_tx(witness_script, CScript([OP_RETURN, b'test123'])) + + # Helper function to pad transaction to target vsize using multiple OP_RETURN outputs + def pad_tx_to_vsize(tx, target_vsize): + """Adjust transaction size by adding/removing multiple OP_RETURN outputs""" + # Keep only the first output, remove all padding outputs + while len(tx.vout) > 1: + tx.vout.pop() + + # MAX_OP_RETURN_RELAY = 83, so max script is: OP_RETURN + 82 bytes data + max_script_size = MAX_OP_RETURN_RELAY + + # Iteratively add outputs until we reach or slightly exceed the target + while True: + current_vsize = tx.get_vsize() + if current_vsize >= target_vsize: + break + + vsize_needed = target_vsize - current_vsize + + # CTxOut serialization: nValue (8) + compact_size(script_len) + script + # For script_len <= 252: compact_size = 1 byte + # So total = 8 + 1 + script_len = 9 + script_len + + # Maximum output: 8 + 1 + 83 = 92 vbytes + if vsize_needed >= 92: + # Add a max-size output + tx.vout.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN] + [OP_1] * (max_script_size - 1)))) + elif vsize_needed >= 10: + # Need to add exactly vsize_needed bytes + # 8 + 1 + script_len = vsize_needed + # script_len = vsize_needed - 9 + script_len = vsize_needed - 9 + # Script is [OP_RETURN] + data, so len = 1 + data_len + # data_len = script_len - 1 + data_len = script_len - 1 + if data_len >= 0: + tx.vout.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN] + [OP_1] * data_len))) + else: + # Just add the minimum and overshoot slightly + tx.vout.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN]))) + break + else: + # vsize_needed < 10, can't add a new output + # Instead, adjust the first output's size by adding to its script + if vsize_needed > 0 and len(tx.vout[0].scriptPubKey) < max_script_size: + # Extend the first output's script + current_script = tx.vout[0].scriptPubKey + # Add vsize_needed more bytes to the script + new_script = bytes(current_script) + bytes([1] * vsize_needed) + # But cap at max_script_size + if len(new_script) <= max_script_size: + tx.vout[0].scriptPubKey = CScript(new_script) + break + + # If we overshot, try to trim the last output + if tx.get_vsize() > target_vsize and len(tx.vout) > 1: + tx.vout.pop() + # Try again with a smaller output + current_vsize = tx.get_vsize() + vsize_needed = target_vsize - current_vsize + if vsize_needed >= 10: + script_len = vsize_needed - 9 + data_len = script_len - 1 + if data_len >= 0: + tx.vout.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN] + [OP_1] * data_len))) + + # Pad to reach sigop-limit equivalent size + pad_tx_to_vsize(tx, sigop_equivalent_vsize) + if tx.get_vsize() != sigop_equivalent_vsize: + self.log.error(f"Padding failed: got {tx.get_vsize()}, expected {sigop_equivalent_vsize}") + self.log.error(f"Number of outputs: {len(tx.vout)}") + for i, out in enumerate(tx.vout): + self.log.error(f"Output {i}: scriptPubKey len={len(out.scriptPubKey)}, vout entry size={8 + 1 + len(out.scriptPubKey)}") + assert_equal(tx.get_vsize(), sigop_equivalent_vsize) res = self.nodes[0].testmempoolaccept([tx.serialize().hex()])[0] assert_equal(res['allowed'], True) @@ -104,7 +174,7 @@ def test_sigops_limit(self, bytes_per_sigop, num_sigops): # increase the tx's vsize to be right above the sigop-limit equivalent size # => tx's vsize in mempool should also grow accordingly - tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'X'*(256+vsize_to_pad+1)]) + pad_tx_to_vsize(tx, sigop_equivalent_vsize + 1) res = self.nodes[0].testmempoolaccept([tx.serialize().hex()])[0] assert_equal(res['allowed'], True) assert_equal(res['vsize'], sigop_equivalent_vsize+1) @@ -113,7 +183,7 @@ def test_sigops_limit(self, bytes_per_sigop, num_sigops): # => tx's vsize in mempool should stick at the sigop-limit equivalent # bytes level, as it is higher than the tx's serialized vsize # (the maximum of both is taken) - tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'X'*(256+vsize_to_pad-1)]) + pad_tx_to_vsize(tx, sigop_equivalent_vsize - 1) res = self.nodes[0].testmempoolaccept([tx.serialize().hex()])[0] assert_equal(res['allowed'], True) assert_equal(res['vsize'], sigop_equivalent_vsize) @@ -123,6 +193,8 @@ def test_sigops_limit(self, bytes_per_sigop, num_sigops): # (to keep it simple, we only test the case here where the sigop vsize # is much larger than the serialized vsize, i.e. we create a small child # tx by getting rid of the large padding output) + while len(tx.vout) > 1: + tx.vout.pop() tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'test123']) assert_greater_than(sigop_equivalent_vsize, tx.get_vsize()) self.nodes[0].sendrawtransaction(hexstring=tx.serialize().hex(), maxburnamount='1.0') @@ -147,33 +219,77 @@ def test_sigops_package(self): self.log.info("Test a overly-large sigops-vbyte hits package limits") # Make a 2-transaction package which fails vbyte checks even though # separately they would work. - self.restart_node(0, extra_args=["-bytespersigop=5000","-permitbaremultisig=1"] + self.extra_args[0]) - - def create_bare_multisig_tx(utxo_to_spend=None): - _, pubkey = generate_keypair() - amount_for_bare = 50000 - tx_dict = self.wallet.create_self_transfer(fee=Decimal("3"), utxo_to_spend=utxo_to_spend) - tx_utxo = tx_dict["new_utxo"] - tx = tx_dict["tx"] - tx.vout.append(CTxOut(amount_for_bare, keys_to_multisig_script([pubkey], k=1))) - tx.vout[0].nValue -= amount_for_bare - tx_utxo["txid"] = tx.rehash() - tx_utxo["value"] -= Decimal("0.00005000") - return (tx_utxo, tx) - - tx_parent_utxo, tx_parent = create_bare_multisig_tx() - _tx_child_utxo, tx_child = create_bare_multisig_tx(tx_parent_utxo) + # + # Using P2WSH multisig instead of bare multisig to comply with REDUCED_DATA + # output size limits (34 bytes max). Witness sigops are discounted by 4x, + # so we use multiple CHECKMULTISIG ops to achieve sufficient sigop-adjusted vsize. + self.restart_node(0, extra_args=["-bytespersigop=5000"] + self.extra_args[0]) + + # With -bytespersigop=5000 and witness discount of 4: + # - Each CHECKMULTISIG = 20 sigops + # - Adjusted vsize per CHECKMULTISIG = 20 * 5000 / 4 = 25,000 + # - Need > 101,000 / 2 = 50,500 per tx to exceed limit as package + # - Use 3 CHECKMULTISIG ops = 60 sigops = 75,000 adjusted vsize per tx + # - Two txs together = 150,000 > 101,000 (fails package limit) + # - Each tx alone = 75,000 < 101,000 (passes individually) + NUM_CHECKMULTISIG_OPS = 3 + expected_sigops_per_tx = NUM_CHECKMULTISIG_OPS * MAX_PUBKEYS_PER_MULTISIG # 60 + expected_vsize_per_tx = expected_sigops_per_tx * 5000 // WITNESS_SCALE_FACTOR # 75,000 + + # Create witness script with multiple CHECKMULTISIG ops (sigops counted even in unexecuted branches) + witness_script = CScript( + [OP_FALSE, OP_IF] + + [OP_CHECKMULTISIG] * NUM_CHECKMULTISIG_OPS + + [OP_ENDIF, OP_TRUE] + ) + p2wsh_script = script_to_p2wsh_script(witness_script) + + # Pre-fund two P2WSH outputs that we'll spend as parent and child + funding_amount = 1000000 + fund_parent = self.wallet.send_to( + from_node=self.nodes[0], + scriptPubKey=p2wsh_script, + amount=funding_amount, + ) + fund_child = self.wallet.send_to( + from_node=self.nodes[0], + scriptPubKey=p2wsh_script, + amount=funding_amount, + ) + self.generate(self.nodes[0], 1) + + # Parent tx: spends first P2WSH (high sigops), outputs to wallet + tx_parent = CTransaction() + tx_parent.vin = [CTxIn(COutPoint(int(fund_parent["txid"], 16), fund_parent["sent_vout"]))] + tx_parent.wit.vtxinwit = [CTxInWitness()] + tx_parent.wit.vtxinwit[0].scriptWitness.stack = [bytes(witness_script)] + # Output back to a standard address (MiniWallet's default) + tx_parent.vout = [CTxOut(funding_amount - 10000, self.wallet.get_output_script())] + tx_parent.rehash() + + # Child tx: spends second P2WSH (high sigops) AND spends parent's output (to form package) + tx_child = CTransaction() + tx_child.vin = [ + CTxIn(COutPoint(int(fund_child["txid"], 16), fund_child["sent_vout"])), # P2WSH input (sigops) + CTxIn(COutPoint(tx_parent.sha256, 0)), # Parent's output (links as child) + ] + tx_child.wit.vtxinwit = [CTxInWitness(), CTxInWitness()] + tx_child.wit.vtxinwit[0].scriptWitness.stack = [bytes(witness_script)] # For P2WSH input + tx_child.wit.vtxinwit[1].scriptWitness.stack = [b''] # Placeholder for wallet input + tx_child.vout = [CTxOut(2 * funding_amount - 30000, self.wallet.get_output_script())] + tx_child.rehash() # Separately, the parent tx is ok parent_individual_testres = self.nodes[0].testmempoolaccept([tx_parent.serialize().hex()])[0] + if not parent_individual_testres["allowed"]: + self.log.error(f"Parent tx rejected: {parent_individual_testres}") assert parent_individual_testres["allowed"] - max_multisig_vsize = MAX_PUBKEYS_PER_MULTISIG * 5000 - assert_equal(parent_individual_testres["vsize"], max_multisig_vsize) + assert_equal(parent_individual_testres["vsize"], expected_vsize_per_tx) # But together, it's exceeding limits in the *package* context. If sigops adjusted vsize wasn't being checked # here, it would get further in validation and give too-long-mempool-chain error instead. packet_test = self.nodes[0].testmempoolaccept([tx_parent.serialize().hex(), tx_child.serialize().hex()]) - expected_package_error = f"package-mempool-limits, package size {2*max_multisig_vsize} exceeds ancestor size limit [limit: 101000]" + expected_package_error = f"package-mempool-limits, package size {2*expected_vsize_per_tx} exceeds ancestor size limit [limit: 101000]" assert_equal([x["package-error"] for x in packet_test], [expected_package_error] * 2) # When we actually try to submit, the parent makes it into the mempool, but the child would exceed ancestor vsize limits diff --git a/test/functional/tool_utxo_to_sqlite.py b/test/functional/tool_utxo_to_sqlite.py index 7399e7b574..abca9d70fa 100755 --- a/test/functional/tool_utxo_to_sqlite.py +++ b/test/functional/tool_utxo_to_sqlite.py @@ -67,29 +67,43 @@ def run_test(self): wallet = MiniWallet(node) key = ECKey() - self.log.info('Create UTXOs with various output script types') + self.log.info('Test that oversized output scripts are rejected') + key.generate(compressed=False) + uncompressed_pubkey = key.get_pubkey().get_bytes() + key.generate(compressed=True) + pubkey = key.get_pubkey().get_bytes() + + # Test that scripts exceeding MAX_OUTPUT_SCRIPT_SIZE=34 are rejected + invalid_scripts = [ + (key_to_p2pk_script(pubkey), "P2PK compressed (35 bytes)"), + (key_to_p2pk_script(uncompressed_pubkey), "P2PK uncompressed (67 bytes)"), + (keys_to_multisig_script([pubkey]), "Bare multisig 1-of-1 (37 bytes)"), + (keys_to_multisig_script([uncompressed_pubkey]*2), "Bare multisig 2-of-2 uncompressed"), + (CScript([CScriptOp.encode_op_n(1)]*1000), "Large script (1000 bytes)"), + ] + + for script, description in invalid_scripts: + try: + wallet.send_to(from_node=node, scriptPubKey=script, amount=1, fee=20000) + raise AssertionError(f"{description} should have been rejected") + except Exception as e: + assert 'bad-txns-vout-script-toolarge' in str(e), \ + f"{description} rejected with wrong error: {e}" + self.log.info(f" ✓ {description} correctly rejected") + + self.log.info('Create UTXOs with valid output script types (≤34 bytes)') for i in range(1, 10+1): - key.generate(compressed=False) - uncompressed_pubkey = key.get_pubkey().get_bytes() key.generate(compressed=True) pubkey = key.get_pubkey().get_bytes() - # add output scripts for compressed script type 0 (P2PKH), type 1 (P2SH), - # types 2-3 (P2PK compressed), types 4-5 (P2PK uncompressed) and - # for uncompressed scripts (bare multisig, segwit, etc.) + # Only include output scripts that comply with MAX_OUTPUT_SCRIPT_SIZE=34 output_scripts = ( - key_to_p2pkh_script(pubkey), - script_to_p2sh_script(key_to_p2pkh_script(pubkey)), - key_to_p2pk_script(pubkey), - key_to_p2pk_script(uncompressed_pubkey), - - keys_to_multisig_script([pubkey]*i), - keys_to_multisig_script([uncompressed_pubkey]*i), - key_to_p2wpkh_script(pubkey), - script_to_p2wsh_script(key_to_p2pkh_script(pubkey)), - output_key_to_p2tr_script(pubkey[1:]), - PAY_TO_ANCHOR, - CScript([CScriptOp.encode_op_n(i)]*(1000*i)), # large script (up to 10000 bytes) + key_to_p2pkh_script(pubkey), # 25 bytes + script_to_p2sh_script(key_to_p2pkh_script(pubkey)), # 23 bytes + key_to_p2wpkh_script(pubkey), # 22 bytes + script_to_p2wsh_script(key_to_p2pkh_script(pubkey)),# 34 bytes + output_key_to_p2tr_script(pubkey[1:]), # 34 bytes + PAY_TO_ANCHOR, # 4 bytes ) # create outputs and mine them in a block From ebe821e7c0103d771e5962d715654099c85707f7 Mon Sep 17 00:00:00 2001 From: 3c853b6299 <3c853b6299@pm.me> Date: Sun, 2 Nov 2025 13:44:25 -0600 Subject: [PATCH 303/356] test: adapt 6 tests to NODE_BIP148 service flag; add assert_equal_without_usage helper for testmempoolaccept results --- test/functional/p2p_addr_relay.py | 5 ++++- test/functional/p2p_handshake.py | 11 +++++----- test/functional/p2p_node_network_limited.py | 3 ++- test/functional/p2p_segwit.py | 7 ++++--- test/functional/rpc_net.py | 10 +++++++-- test/functional/rpc_packages.py | 21 ++++++++++--------- test/functional/test_framework/util.py | 23 +++++++++++++++++++++ 7 files changed, 58 insertions(+), 22 deletions(-) diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index 56a9e6a84e..1a5bf45301 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -11,6 +11,9 @@ from test_framework.messages import ( CAddress, + NODE_BIP148, + NODE_NETWORK, + NODE_WITNESS, msg_addr, msg_getaddr, msg_verack, @@ -52,7 +55,7 @@ def on_addr(self, message): if self.test_addr_contents: # relay_tests checks the content of the addr messages match # expectations based on the message creation in setup_addr_msg - assert_equal(addr.nServices, 9) + assert_equal(addr.nServices, NODE_NETWORK | NODE_WITNESS | NODE_BIP148) if not 8333 <= addr.port < 8343: raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port)) assert addr.ip.startswith('123.123.') diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py index 31149fb1ae..67c7174ffe 100755 --- a/test/functional/p2p_handshake.py +++ b/test/functional/p2p_handshake.py @@ -11,6 +11,7 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.messages import ( + NODE_BIP148, NODE_NETWORK, NODE_NETWORK_LIMITED, NODE_NONE, @@ -34,8 +35,8 @@ # the desirable service flags for pruned peers are dynamic and only apply if # 1. the peer's service flag NODE_NETWORK_LIMITED is set *and* # 2. the local chain is close to the tip (<24h) -DESIRABLE_SERVICE_FLAGS_FULL = NODE_NETWORK | NODE_WITNESS -DESIRABLE_SERVICE_FLAGS_PRUNED = NODE_NETWORK_LIMITED | NODE_WITNESS +DESIRABLE_SERVICE_FLAGS_FULL = NODE_NETWORK | NODE_WITNESS | NODE_BIP148 +DESIRABLE_SERVICE_FLAGS_PRUNED = NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_BIP148 class P2PHandshakeTest(BitcoinTestFramework): @@ -98,15 +99,15 @@ def run_test(self): self.log.info("Check that lacking desired service flags leads to disconnect (non-pruned peers)") self.test_desirable_service_flags(node, [NODE_NONE, NODE_NETWORK, NODE_WITNESS], DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True) - self.test_desirable_service_flags(node, [NODE_NETWORK | NODE_WITNESS], + self.test_desirable_service_flags(node, [NODE_NETWORK | NODE_WITNESS | NODE_BIP148], DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=False) self.log.info("Check that limited peers are only desired if the local chain is close to the tip (<24h)") self.generate_at_mocktime(int(time.time()) - 25 * 3600) # tip outside the 24h window, should fail - self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS], + self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_BIP148], DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True) self.generate_at_mocktime(int(time.time()) - 23 * 3600) # tip inside the 24h window, should succeed - self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS], + self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_BIP148], DESIRABLE_SERVICE_FLAGS_PRUNED, expect_disconnect=False) self.log.info("Check that feeler connections get disconnected immediately") diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py index 8982c35553..c2d789609e 100755 --- a/test/functional/p2p_node_network_limited.py +++ b/test/functional/p2p_node_network_limited.py @@ -11,6 +11,7 @@ from test_framework.messages import ( CInv, MSG_BLOCK, + NODE_BIP148, NODE_NETWORK_LIMITED, NODE_P2P_V2, NODE_WITNESS, @@ -122,7 +123,7 @@ def test_avoid_requesting_historical_blocks(self): def run_test(self): node = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) - expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED + expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED | NODE_BIP148 if self.options.v2transport: expected_services |= NODE_P2P_V2 diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 694d355506..3c4ed7b521 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -82,6 +82,7 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, + assert_equal_without_usage, assert_raises_rpc_error, ensure_for, softfork_active, @@ -621,7 +622,7 @@ def test_standardness_v0(self): testres3 = self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]) testres3[0]["fees"].pop("effective-feerate") testres3[0]["fees"].pop("effective-includes") - assert_equal(testres3, + assert_equal_without_usage(testres3, [{ 'txid': tx3.hash, 'wtxid': tx3.getwtxid(), @@ -640,7 +641,7 @@ def test_standardness_v0(self): testres3_replaced = self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]) testres3_replaced[0]["fees"].pop("effective-feerate") testres3_replaced[0]["fees"].pop("effective-includes") - assert_equal(testres3_replaced, + assert_equal_without_usage(testres3_replaced, [{ 'txid': tx3.hash, 'wtxid': tx3.getwtxid(), @@ -1355,7 +1356,7 @@ def test_segwit_versions(self): # First try to spend to a future version segwit script_pubkey. if version == OP_1: # Don't use 32-byte v1 witness (used by Taproot; see BIP 341) - script_pubkey = CScript([CScriptOp(version), witness_hash + b'\x00']) + script_pubkey = CScript([CScriptOp(version), witness_hash[:31]]) else: script_pubkey = CScript([CScriptOp(version), witness_hash]) tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")] diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 3261fda198..e935bba7b0 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -13,6 +13,10 @@ import time import test_framework.messages +from test_framework.messages import ( + NODE_NETWORK, + NODE_WITNESS, +) from test_framework.p2p import ( P2PInterface, P2P_SERVICES, @@ -315,7 +319,8 @@ def test_getnodeaddresses(self): assert_greater_than(10000, len(node_addresses)) for a in node_addresses: assert_greater_than(a["time"], 1527811200) # 1st June 2018 - assert_equal(a["services"], P2P_SERVICES) + # addpeeraddress stores addresses with default services (NODE_NETWORK | NODE_WITNESS) + assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS) assert a["address"] in imported_addrs assert_equal(a["port"], 8333) assert_equal(a["network"], "ipv4") @@ -326,7 +331,8 @@ def test_getnodeaddresses(self): assert_equal(res[0]["address"], ipv6_addr) assert_equal(res[0]["network"], "ipv6") assert_equal(res[0]["port"], 8333) - assert_equal(res[0]["services"], P2P_SERVICES) + # addpeeraddress stores addresses with default services (NODE_NETWORK | NODE_WITNESS) + assert_equal(res[0]["services"], NODE_NETWORK | NODE_WITNESS) # Test for the absence of onion, I2P and CJDNS addresses. for network in ["onion", "i2p", "cjdns"]: diff --git a/test/functional/rpc_packages.py b/test/functional/rpc_packages.py index 539e9d09ad..dc7b2f09fc 100755 --- a/test/functional/rpc_packages.py +++ b/test/functional/rpc_packages.py @@ -19,6 +19,7 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, + assert_equal_without_usage, assert_fee_amount, assert_raises_rpc_error, ) @@ -48,7 +49,7 @@ def assert_testres_equal(self, package_hex, testres_expected): random.shuffle(shuffled_indeces) shuffled_package = [package_hex[i] for i in shuffled_indeces] shuffled_testres = [testres_expected[i] for i in shuffled_indeces] - assert_equal(shuffled_testres, self.nodes[0].testmempoolaccept(shuffled_package)) + assert_equal_without_usage(self.nodes[0].testmempoolaccept(shuffled_package), shuffled_testres) def run_test(self): node = self.nodes[0] @@ -119,7 +120,7 @@ def test_independent(self, coin): # transactions here but empty results in other cases. tx_bad_sig_txid = tx_bad_sig.rehash() tx_bad_sig_wtxid = tx_bad_sig.getwtxid() - assert_equal(testres_bad_sig, self.independent_txns_testres + [{ + assert_equal_without_usage(testres_bad_sig, self.independent_txns_testres + [{ "txid": tx_bad_sig_txid, "wtxid": tx_bad_sig_wtxid, "allowed": False, "reject-reason": "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", @@ -130,12 +131,12 @@ def test_independent(self, coin): self.log.info("Check testmempoolaccept reports txns in packages that exceed max feerate") tx_high_fee = self.wallet.create_self_transfer(fee=Decimal("0.999")) testres_high_fee = node.testmempoolaccept([tx_high_fee["hex"]]) - assert_equal(testres_high_fee, [ + assert_equal_without_usage(testres_high_fee, [ {"txid": tx_high_fee["txid"], "wtxid": tx_high_fee["wtxid"], "allowed": False, "reject-reason": "max-fee-exceeded"} ]) package_high_fee = [tx_high_fee["hex"]] + self.independent_txns_hex testres_package_high_fee = node.testmempoolaccept(package_high_fee) - assert_equal(testres_package_high_fee, testres_high_fee + self.independent_txns_testres_blank) + assert_equal_without_usage(testres_package_high_fee, testres_high_fee + self.independent_txns_testres_blank) def test_chain(self): node = self.nodes[0] @@ -145,7 +146,7 @@ def test_chain(self): chain_txns = [t["tx"] for t in chain] self.log.info("Check that testmempoolaccept requires packages to be sorted by dependency") - assert_equal(node.testmempoolaccept(rawtxs=chain_hex[::-1]), + assert_equal_without_usage(node.testmempoolaccept(rawtxs=chain_hex[::-1]), [{"txid": tx.rehash(), "wtxid": tx.getwtxid(), "package-error": "package-not-sorted"} for tx in chain_txns[::-1]]) self.log.info("Testmempoolaccept a chain of 25 transactions") @@ -158,7 +159,7 @@ def test_chain(self): testres_single.append(testres[0]) # Submit the transaction now so its child should have no problem validating node.sendrawtransaction(rawtx) - assert_equal(testres_single, testres_multiple) + assert_equal_without_usage(testres_single, testres_multiple) # Clean up by clearing the mempool self.generate(node, 1) @@ -235,14 +236,14 @@ def test_conflicting(self): self.log.info("Test duplicate transactions in the same package") testres = node.testmempoolaccept([tx1["hex"], tx1["hex"]]) - assert_equal(testres, [ + assert_equal_without_usage(testres, [ {"txid": tx1["txid"], "wtxid": tx1["wtxid"], "package-error": "package-contains-duplicates"}, {"txid": tx1["txid"], "wtxid": tx1["wtxid"], "package-error": "package-contains-duplicates"} ]) self.log.info("Test conflicting transactions in the same package") testres = node.testmempoolaccept([tx1["hex"], tx2["hex"]]) - assert_equal(testres, [ + assert_equal_without_usage(testres, [ {"txid": tx1["txid"], "wtxid": tx1["wtxid"], "package-error": "conflict-in-package"}, {"txid": tx2["txid"], "wtxid": tx2["wtxid"], "package-error": "conflict-in-package"} ]) @@ -255,7 +256,7 @@ def test_conflicting(self): testres = node.testmempoolaccept([tx1["hex"], tx2["hex"], tx_child["hex"]]) - assert_equal(testres, [ + assert_equal_without_usage(testres, [ {"txid": tx1["txid"], "wtxid": tx1["wtxid"], "package-error": "conflict-in-package"}, {"txid": tx2["txid"], "wtxid": tx2["wtxid"], "package-error": "conflict-in-package"}, {"txid": tx_child["txid"], "wtxid": tx_child["wtxid"], "package-error": "conflict-in-package"} @@ -296,7 +297,7 @@ def test_rbf(self): # Replacement transaction is identical except has double the fee replacement_tx = self.wallet.create_self_transfer(utxo_to_spend=coin, sequence=MAX_BIP125_RBF_SEQUENCE, fee = 2 * fee) testres_rbf_conflicting = node.testmempoolaccept([replaceable_tx["hex"], replacement_tx["hex"]]) - assert_equal(testres_rbf_conflicting, [ + assert_equal_without_usage(testres_rbf_conflicting, [ {"txid": replaceable_tx["txid"], "wtxid": replaceable_tx["wtxid"], "package-error": "conflict-in-package"}, {"txid": replacement_tx["txid"], "wtxid": replacement_tx["wtxid"], "package-error": "conflict-in-package"} ]) diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 59c7c78595..32143b297c 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -77,6 +77,29 @@ def assert_equal(thing1, thing2, *args): raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args)) +def assert_equal_without_usage(actual, expected): + """ + Assert that testmempoolaccept results match expected values, ignoring the 'usage' field. + This helper is for tests that were written before the 'usage' field was added. + """ + if isinstance(actual, list) and isinstance(expected, list): + assert_equal(len(actual), len(expected)) + for act, exp in zip(actual, expected): + assert_equal_without_usage(act, exp) + elif isinstance(actual, dict) and isinstance(expected, dict): + # Check that all expected keys match + for key in expected: + assert key in actual, f"Expected key '{key}' not in actual result" + if key != 'usage': # Skip usage comparison + assert_equal(actual[key], expected[key]) + # Verify usage exists and is positive if transaction was validated + if 'usage' in actual: + assert isinstance(actual['usage'], int), "usage should be an integer" + assert actual['usage'] > 0, "usage should be positive" + else: + assert_equal(actual, expected) + + def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s" % (str(thing1), str(thing2))) From 5ee8102c82dae7665426a546ffef40fa8a1da50b Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Sun, 2 Nov 2025 14:13:35 -0600 Subject: [PATCH 304/356] Fix p2p_1p1c_network test by using dynamic feerates based on mempool eviction threshold MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The test was failing because commit 58a329b901 changed gen_return_txouts() from using 1 large OP_RETURN output to 734 small OP_RETURN outputs (to comply with the new MAX_OUTPUT_SCRIPT_SIZE=34 consensus rule in bip444). This change altered how fill_mempool() fills the mempool, raising the eviction threshold from ~0.68 sat/vB to ~1.10 sat/vB. The test's create_package_2p1c() was using hardcoded feerates (1.0 and 2.0 sat/vB), causing parent1 to be below the new eviction threshold and get rejected. Solution: Calculate parent feerates dynamically based on the actual mempoolminfee after fill_mempool() runs. This makes the test robust to future changes in mempool dynamics. - Store mempoolminfee in raise_network_minfee() - Use 2x and 4x mempoolminfee for parent1 and parent2 feerates - Add logging to show the calculated feerates Test results with fix: - mempoolminfee: 1.101 sat/vB - parent1: 2.202 sat/vB (2x threshold) → accepted ✓ - parent2: 4.404 sat/vB (4x threshold) → accepted ✓ --- test/functional/p2p_1p1c_network.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/test/functional/p2p_1p1c_network.py b/test/functional/p2p_1p1c_network.py index 4f03542168..ab549393fa 100755 --- a/test/functional/p2p_1p1c_network.py +++ b/test/functional/p2p_1p1c_network.py @@ -53,6 +53,10 @@ def raise_network_minfee(self): assert_equal(node.getmempoolinfo()['minrelaytxfee'], Decimal(DEFAULT_MIN_RELAY_TX_FEE) / COIN) assert_greater_than(node.getmempoolinfo()['mempoolminfee'], Decimal(DEFAULT_MIN_RELAY_TX_FEE) / COIN) + # Store mempoolminfee for dynamic feerate calculation + self.mempoolminfee = self.nodes[0].getmempoolinfo()['mempoolminfee'] + self.log.info(f"mempoolminfee after fill_mempool: {self.mempoolminfee} BTC/kvB ({self.mempoolminfee * 100000:.4f} sat/vB)") + def create_basic_1p1c(self, wallet): low_fee_parent = wallet.create_self_transfer(fee_rate=Decimal(DEFAULT_MIN_RELAY_TX_FEE) / COIN, confirmed_only=True) high_fee_child = wallet.create_self_transfer(utxo_to_spend=low_fee_parent["new_utxo"], fee_rate=999*Decimal(DEFAULT_MIN_RELAY_TX_FEE)/ COIN) @@ -86,8 +90,15 @@ def create_package_2outs(self, wallet): return [low_fee_parent_2outs["hex"], high_fee_child_2outs["hex"]], low_fee_parent_2outs["tx"], high_fee_child_2outs["tx"] def create_package_2p1c(self, wallet): - parent1 = wallet.create_self_transfer(fee_rate=Decimal(DEFAULT_MIN_RELAY_TX_FEE) / COIN * 10, confirmed_only=True) - parent2 = wallet.create_self_transfer(fee_rate=Decimal(DEFAULT_MIN_RELAY_TX_FEE) / COIN * 20, confirmed_only=True) + # Use dynamic feerates based on actual mempoolminfee to ensure parents are above eviction threshold + # Set parent1 at 2x threshold, parent2 at 4x threshold (same relative ratio as before) + parent1_feerate = self.mempoolminfee * 2 + parent2_feerate = self.mempoolminfee * 4 + + self.log.info(f"Creating 2p1c package with parent1={parent1_feerate} BTC/kvB, parent2={parent2_feerate} BTC/kvB") + + parent1 = wallet.create_self_transfer(fee_rate=parent1_feerate, confirmed_only=True) + parent2 = wallet.create_self_transfer(fee_rate=parent2_feerate, confirmed_only=True) child = wallet.create_self_transfer_multi( utxos_to_spend=[parent1["new_utxo"], parent2["new_utxo"]], fee_per_output=999*parent1["tx"].get_vsize(), From 5efab4aedc22cc32c696e0de4dbb50b26f643722 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Sun, 2 Nov 2025 19:58:03 -0600 Subject: [PATCH 305/356] Fix interface_bitcoin_cli test to accept BIP148 service bit --- test/functional/interface_bitcoin_cli.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py index 8f62487e1b..6c022d1ef1 100755 --- a/test/functional/interface_bitcoin_cli.py +++ b/test/functional/interface_bitcoin_cli.py @@ -94,7 +94,7 @@ def test_netinfo(self): self.log.info("Test -netinfo local services are moved to header if details are requested") det = self.nodes[0].cli('-netinfo', '1').send_cli().splitlines() self.log.debug(f"Test -netinfo 1 header output: {det[0]}") - assert re.match(rf"^{re.escape(self.config['environment']['CLIENT_NAME'])} client.+services nwl2?$", det[0]) + assert re.match(rf"^{re.escape(self.config['environment']['CLIENT_NAME'])} client.+services nwl2?1$", det[0]) assert not any(line.startswith("Local services:") for line in det) def run_test(self): From 4b757d83c35c4a7cba7394c36af90f1e5e2d69c4 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Sun, 2 Nov 2025 20:01:41 -0600 Subject: [PATCH 306/356] Fix p2p_addrv2_relay test to handle BIP148 service bit CompactSize encoding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The test was expecting addrv2 messages to be 187 bytes, but they're now 227 bytes due to the BIP148 service bit being added to P2P_SERVICES. P2P_SERVICES is now NODE_NETWORK | NODE_WITNESS | NODE_BIP148 = 0x08000009, which requires 5 bytes in CompactSize encoding (not 1 byte as before). Updated calc_addrv2_msg_size() to properly calculate the services field size using ser_compact_size() instead of assuming 1 byte. Difference: 5 bytes - 1 byte = 4 bytes per address × 10 addresses = 40 bytes 187 + 40 = 227 bytes ✓ --- test/functional/p2p_addrv2_relay.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/functional/p2p_addrv2_relay.py b/test/functional/p2p_addrv2_relay.py index 8012137971..3609424902 100755 --- a/test/functional/p2p_addrv2_relay.py +++ b/test/functional/p2p_addrv2_relay.py @@ -12,6 +12,7 @@ CAddress, msg_addrv2, msg_sendaddrv2, + ser_compact_size, ) from test_framework.p2p import ( P2PInterface, @@ -62,7 +63,7 @@ def calc_addrv2_msg_size(addrs): size = 1 # vector length byte for addr in addrs: size += 4 # time - size += 1 # services, COMPACTSIZE(P2P_SERVICES) + size += len(ser_compact_size(P2P_SERVICES)) # services, COMPACTSIZE(P2P_SERVICES) size += 1 # network id size += 1 # address length byte size += addr.ADDRV2_ADDRESS_LENGTH[addr.net] # address From f648a21d819144efe2cec0dc00782d49d2eeef21 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Sun, 2 Nov 2025 20:42:28 -0600 Subject: [PATCH 307/356] Fix addpeeraddress RPC to include NODE_BIP148 service flag The addpeeraddress RPC was creating addresses with only NODE_NETWORK | NODE_WITNESS, but the node requires NODE_BIP148 for outbound connections (added in commit c684ff1f88 from 2017). ThreadOpenConnections filters addresses using HasAllDesirableServiceFlags, which requires NODE_NETWORK | NODE_WITNESS | NODE_BIP148. Addresses without NODE_BIP148 are skipped entirely, making addpeeraddress useless for its intended testing purpose. This fix updates addpeeraddress to match production requirements, allowing test-added addresses to actually be used for outbound connections. Fixes p2p_seednode.py test which was failing because addresses added via addpeeraddress were being filtered out, preventing "trying v1 connection" log messages from appearing. --- src/rpc/net.cpp | 2 +- test/functional/rpc_net.py | 25 +++++++++++++------------ 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 8a903b4452..d363e2b07b 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -1057,7 +1057,7 @@ static RPCHelpMan addpeeraddress() if (net_addr.has_value()) { CService service{net_addr.value(), port}; - CAddress address{MaybeFlipIPv6toCJDNS(service), ServiceFlags{NODE_NETWORK | NODE_WITNESS}}; + CAddress address{MaybeFlipIPv6toCJDNS(service), ServiceFlags{NODE_NETWORK | NODE_WITNESS | NODE_BIP148}}; address.nTime = Now(); // The source address is set equal to the address. This is equivalent to the peer // announcing itself. diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index e935bba7b0..0fce884b44 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -14,6 +14,7 @@ import test_framework.messages from test_framework.messages import ( + NODE_BIP148, NODE_NETWORK, NODE_WITNESS, ) @@ -319,8 +320,8 @@ def test_getnodeaddresses(self): assert_greater_than(10000, len(node_addresses)) for a in node_addresses: assert_greater_than(a["time"], 1527811200) # 1st June 2018 - # addpeeraddress stores addresses with default services (NODE_NETWORK | NODE_WITNESS) - assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS) + # addpeeraddress stores addresses with default services (NODE_NETWORK | NODE_WITNESS | NODE_BIP148) + assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS | NODE_BIP148) assert a["address"] in imported_addrs assert_equal(a["port"], 8333) assert_equal(a["network"], "ipv4") @@ -331,8 +332,8 @@ def test_getnodeaddresses(self): assert_equal(res[0]["address"], ipv6_addr) assert_equal(res[0]["network"], "ipv6") assert_equal(res[0]["port"], 8333) - # addpeeraddress stores addresses with default services (NODE_NETWORK | NODE_WITNESS) - assert_equal(res[0]["services"], NODE_NETWORK | NODE_WITNESS) + # addpeeraddress stores addresses with default services (NODE_NETWORK | NODE_WITNESS | NODE_BIP148) + assert_equal(res[0]["services"], NODE_NETWORK | NODE_WITNESS | NODE_BIP148) # Test for the absence of onion, I2P and CJDNS addresses. for network in ["onion", "i2p", "cjdns"]: @@ -510,7 +511,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "82/8", "address": "2.0.0.0", "port": 8333, - "services": 9, + "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, "network": "ipv4", "source": "2.0.0.0", "source_network": "ipv4", @@ -519,7 +520,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "336/24", "address": "fc00:1:2:3:4:5:6:7", "port": 8333, - "services": 9, + "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, "network": "cjdns", "source": "fc00:1:2:3:4:5:6:7", "source_network": "cjdns", @@ -528,7 +529,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "963/46", "address": "c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p", "port": 8333, - "services": 9, + "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, "network": "i2p", "source": "c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p", "source_network": "i2p", @@ -536,7 +537,7 @@ def check_getrawaddrman_entries(expected): { "bucket_position": "613/6", "address": "2803:0:1234:abcd::1", - "services": 9, + "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, "network": "ipv6", "source": "2803:0:1234:abcd::1", "source_network": "ipv6", @@ -548,7 +549,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "6/33", "address": "1.2.3.4", "port": 8333, - "services": 9, + "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, "network": "ipv4", "source": "1.2.3.4", "source_network": "ipv4", @@ -557,7 +558,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "197/34", "address": "1233:3432:2434:2343:3234:2345:6546:4534", "port": 8333, - "services": 9, + "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, "network": "ipv6", "source": "1233:3432:2434:2343:3234:2345:6546:4534", "source_network": "ipv6", @@ -566,7 +567,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "72/61", "address": "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", "port": 8333, - "services": 9, + "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, "network": "onion", "source": "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", "source_network": "onion" @@ -574,7 +575,7 @@ def check_getrawaddrman_entries(expected): { "bucket_position": "139/46", "address": "nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion", - "services": 9, + "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, "network": "onion", "source": "nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion", "source_network": "onion", From 6e9358ee629074e233c46d1e9966a9bae8b042a4 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Mon, 3 Nov 2025 18:33:52 +0000 Subject: [PATCH 308/356] Refactor: Include all reduced_data verify flags in REDUCED_DATA_MANDATORY_VERIFY_FLAGS --- src/policy/policy.h | 2 +- src/script/interpreter.h | 7 +++++++ src/validation.cpp | 5 +---- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/policy/policy.h b/src/policy/policy.h index d19433285e..908c2346e0 100644 --- a/src/policy/policy.h +++ b/src/policy/policy.h @@ -171,7 +171,7 @@ static constexpr unsigned int STANDARD_SCRIPT_VERIFY_FLAGS{MANDATORY_SCRIPT_VERI SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION | SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS | SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE | - SCRIPT_VERIFY_REDUCED_DATA}; + REDUCED_DATA_MANDATORY_VERIFY_FLAGS}; /** For convenience, standard but not mandatory verify flags. */ static constexpr unsigned int STANDARD_NOT_MANDATORY_VERIFY_FLAGS{STANDARD_SCRIPT_VERIFY_FLAGS & ~MANDATORY_SCRIPT_VERIFY_FLAGS}; diff --git a/src/script/interpreter.h b/src/script/interpreter.h index 0f641acc87..392914b71f 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -155,6 +155,13 @@ enum : uint32_t { SCRIPT_VERIFY_END_MARKER }; +static constexpr unsigned int REDUCED_DATA_MANDATORY_VERIFY_FLAGS{0 + | SCRIPT_VERIFY_REDUCED_DATA + | SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM + | SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION + | SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS +}; + bool CheckSignatureEncoding(const std::vector &vchSig, unsigned int flags, ScriptError* serror); struct PrecomputedTransactionData diff --git a/src/validation.cpp b/src/validation.cpp index bfc9c21542..1914df7a0c 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2672,10 +2672,7 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const Ch } if (DeploymentActiveAt(block_index, chainman, Consensus::DEPLOYMENT_REDUCED_DATA)) { - flags |= SCRIPT_VERIFY_REDUCED_DATA | - SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM | - SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION | - SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS; + flags |= REDUCED_DATA_MANDATORY_VERIFY_FLAGS; } return flags; From 8ca8d8918f4322647f2e2767c4a085b98ba7a80e Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Mon, 3 Nov 2025 18:41:11 +0000 Subject: [PATCH 309/356] validation: Extend CheckInputScripts to allow overriding script validation flags on a pet-input basis --- src/test/txvalidationcache_tests.cpp | 4 +++- src/validation.cpp | 12 ++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index d22b815fcd..11e48b9f53 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -24,7 +24,9 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, ValidationCache& validation_cache, - std::vector* pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + std::vector* pvChecks, + const std::vector& flags_per_input = {} +) EXCLUSIVE_LOCKS_REQUIRED(cs_main); BOOST_AUTO_TEST_SUITE(txvalidationcache_tests) diff --git a/src/validation.cpp b/src/validation.cpp index 1914df7a0c..b4d7619c43 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -142,7 +142,8 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, ValidationCache& validation_cache, - std::vector* pvChecks = nullptr) + std::vector* pvChecks = nullptr, + const std::vector& flags_per_input = {}) EXCLUSIVE_LOCKS_REQUIRED(cs_main); bool CheckFinalTxAtTip(const CBlockIndex& active_chain_tip, const CTransaction& tx) @@ -2405,6 +2406,10 @@ ValidationCache::ValidationCache(const size_t script_execution_cache_bytes, cons * This involves ECDSA signature checks so can be computationally intensive. This function should * only be called after the cheap sanity checks in CheckTxInputs passed. * + * WARNING: flags_per_input deviations from flags must be handled with care. Under no + * circumstances should they allow a script to pass that might not pass with the same + * `flags` parameter (which is used for the cache). + * * If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any * script checks which are not necessary (eg due to script execution cache hits) are, obviously, * not pushed onto pvChecks/run. @@ -2422,7 +2427,8 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, ValidationCache& validation_cache, - std::vector* pvChecks) + std::vector* pvChecks, + const std::vector& flags_per_input) { if (tx.IsCoinBase()) return true; @@ -2456,8 +2462,10 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, txdata.Init(tx, std::move(spent_outputs)); } assert(txdata.m_spent_outputs.size() == tx.vin.size()); + assert(flags_per_input.empty() || flags_per_input.size() == tx.vin.size()); for (unsigned int i = 0; i < tx.vin.size(); i++) { + if (!flags_per_input.empty()) flags = flags_per_input[i]; // We very carefully only pass in things to CScriptCheck which // are clearly committed to by tx' witness hash. This provides From 66d749f2cf4924d924478a15bb25a4ab17974196 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Mon, 3 Nov 2025 18:43:43 +0000 Subject: [PATCH 310/356] validation: Exempt inputs spending UTXOs prior to ReducedDataHeightBegin from reduced_data script validation rules --- src/validation.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/validation.cpp b/src/validation.cpp index b4d7619c43..1971c44d28 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2891,6 +2891,8 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, CCheckQueueControl control(fScriptChecks && parallel_script_checks ? &m_chainman.GetCheckQueue() : nullptr); std::vector txsdata(block.vtx.size()); + const auto reduced_data_start_height{params.GetConsensus().ReducedDataHeightBegin}; + const auto chk_input_rules{DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_REDUCED_DATA) ? CheckTxInputsRules::OutputSizeLimit : CheckTxInputsRules::None}; std::vector prevheights; @@ -2898,6 +2900,7 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, int nInputs = 0; int64_t nSigOpsCost = 0; blockundo.vtxundo.reserve(block.vtx.size() - 1); + std::vector flags_per_input; for (unsigned int i = 0; i < block.vtx.size(); i++) { if (!state.IsValid()) break; @@ -2927,8 +2930,10 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, // BIP68 lock checks (as opposed to nLockTime checks) must // be in ConnectBlock because they require the UTXO set prevheights.resize(tx.vin.size()); + flags_per_input.resize(tx.vin.size()); for (size_t j = 0; j < tx.vin.size(); j++) { prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight; + flags_per_input[j] = (prevheights[j] < reduced_data_start_height) ? (flags & ~REDUCED_DATA_MANDATORY_VERIFY_FLAGS) : flags; } if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) { @@ -2953,7 +2958,7 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, std::vector vChecks; bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */ TxValidationState tx_state; - if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], m_chainman.m_validation_cache, parallel_script_checks ? &vChecks : nullptr)) { + if (fScriptChecks && !CheckInputScripts(tx, tx_state, view, flags, fCacheResults, fCacheResults, txsdata[i], m_chainman.m_validation_cache, parallel_script_checks ? &vChecks : nullptr, flags_per_input)) { // Any transaction validation failure in ConnectBlock is a block consensus failure state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, tx_state.GetRejectReason(), tx_state.GetDebugMessage()); From b98b62f1ffaead20349a2066a191bf70adf9926a Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Tue, 18 Nov 2025 15:47:18 -0600 Subject: [PATCH 311/356] Add expiry support to versionbit deployments --- src/consensus/params.h | 3 +++ src/deploymentstatus.h | 10 +++++++++- src/rpc/blockchain.cpp | 11 +++++++++-- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/src/consensus/params.h b/src/consensus/params.h index dd29b9408e..79c3e68e7b 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -52,6 +52,9 @@ struct BIP9Deployment { * boundary. */ int min_activation_height{0}; + /** For temporary softforks: number of blocks the deployment remains active after activation. + * std::numeric_limits::max() means permanent (never expires). */ + int active_duration{std::numeric_limits::max()}; /** Constant for nTimeout very far in the future. */ static constexpr int64_t NO_TIMEOUT = std::numeric_limits::max(); diff --git a/src/deploymentstatus.h b/src/deploymentstatus.h index 03d3c531cc..550f38154b 100644 --- a/src/deploymentstatus.h +++ b/src/deploymentstatus.h @@ -20,7 +20,15 @@ inline bool DeploymentActiveAfter(const CBlockIndex* pindexPrev, const Consensus inline bool DeploymentActiveAfter(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos dep, VersionBitsCache& versionbitscache) { assert(Consensus::ValidDeployment(dep)); - return ThresholdState::ACTIVE == versionbitscache.State(pindexPrev, params, dep); + if (ThresholdState::ACTIVE != versionbitscache.State(pindexPrev, params, dep)) return false; + + const auto& deployment = params.vDeployments[dep]; + // Permanent deployment (never expires) + if (deployment.active_duration == std::numeric_limits::max()) return true; + + const int activation_height = versionbitscache.StateSinceHeight(pindexPrev, params, dep); + const int height = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; + return height < activation_height + deployment.active_duration; } /** Determine if a deployment is active for this block */ diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 89ab885caa..680a986870 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1729,7 +1729,13 @@ static void SoftForkDescPushBack(const CBlockIndex* blockindex, UniValue& softfo UniValue rv(UniValue::VOBJ); rv.pushKV("type", "bip9"); if (ThresholdState::ACTIVE == next_state) { - rv.pushKV("height", chainman.m_versionbitscache.StateSinceHeight(blockindex, chainman.GetConsensus(), id)); + const int activation_height = chainman.m_versionbitscache.StateSinceHeight(blockindex, chainman.GetConsensus(), id); + rv.pushKV("height", activation_height); + // Add height_end for temporary softforks + const auto& deployment = chainman.GetConsensus().vDeployments[id]; + if (deployment.active_duration < std::numeric_limits::max()) { + rv.pushKV("height_end", activation_height + deployment.active_duration - 1); + } } rv.pushKV("active", ThresholdState::ACTIVE == next_state); rv.pushKV("bip9", std::move(bip9)); @@ -1826,7 +1832,8 @@ RPCHelpMan getblockchaininfo() namespace { const std::vector RPCHelpForDeployment{ {RPCResult::Type::STR, "type", "one of \"buried\", \"bip9\""}, - {RPCResult::Type::NUM, "height", /*optional=*/true, "height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)"}, + {RPCResult::Type::NUM, "height", /*optional=*/true, "height of the first block which enforces the rules (only for \"buried\" type, or \"bip9\" type with \"active\" status)"}, + {RPCResult::Type::NUM, "height_end", /*optional=*/true, "height of the last block which enforces the rules (only for \"bip9\" type with \"active\" status and temporary deployments)"}, {RPCResult::Type::BOOL, "active", "true if the rules are enforced for the mempool and the next block"}, {RPCResult::Type::OBJ, "bip9", /*optional=*/true, "status of bip9 softforks (only for \"bip9\" type)", { From ec553b01f2f18a6602aa816128159fbc4c0c81a2 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Tue, 18 Nov 2025 16:05:30 -0600 Subject: [PATCH 312/356] Add max_activation_height for mandatory BIP9 activation --- src/consensus/params.h | 4 ++++ src/rpc/blockchain.cpp | 4 ++++ src/versionbits.cpp | 9 +++++++++ src/versionbits.h | 1 + 4 files changed, 18 insertions(+) diff --git a/src/consensus/params.h b/src/consensus/params.h index 79c3e68e7b..76fa1381ae 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -52,6 +52,10 @@ struct BIP9Deployment { * boundary. */ int min_activation_height{0}; + /** Maximum height for activation. If less than INT_MAX, the deployment will activate + * at this height regardless of signaling (similar to BIP8 flag day). + * std::numeric_limits::max() means no maximum (activation only via signaling). */ + int max_activation_height{std::numeric_limits::max()}; /** For temporary softforks: number of blocks the deployment remains active after activation. * std::numeric_limits::max() means permanent (never expires). */ int active_duration{std::numeric_limits::max()}; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 680a986870..a7662f46a3 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1697,6 +1697,9 @@ static void SoftForkDescPushBack(const CBlockIndex* blockindex, UniValue& softfo bip9.pushKV("start_time", chainman.GetConsensus().vDeployments[id].nStartTime); bip9.pushKV("timeout", chainman.GetConsensus().vDeployments[id].nTimeout); bip9.pushKV("min_activation_height", chainman.GetConsensus().vDeployments[id].min_activation_height); + if (chainman.GetConsensus().vDeployments[id].max_activation_height < std::numeric_limits::max()) { + bip9.pushKV("max_activation_height", chainman.GetConsensus().vDeployments[id].max_activation_height); + } // BIP9 status bip9.pushKV("status", get_state_name(current_state)); @@ -1841,6 +1844,7 @@ const std::vector RPCHelpForDeployment{ {RPCResult::Type::NUM_TIME, "start_time", "the minimum median time past of a block at which the bit gains its meaning"}, {RPCResult::Type::NUM_TIME, "timeout", "the median time past of a block at which the deployment is considered failed if not yet locked in"}, {RPCResult::Type::NUM, "min_activation_height", "minimum height of blocks for which the rules may be enforced"}, + {RPCResult::Type::NUM, "max_activation_height", /*optional=*/true, "height at which the deployment will unconditionally activate (absent for miner-vetoable deployments)"}, {RPCResult::Type::STR, "status", "status of deployment at specified block (one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\")"}, {RPCResult::Type::NUM, "since", "height of the first block to which the status applies"}, {RPCResult::Type::STR, "status_next", "status of deployment at the next block"}, diff --git a/src/versionbits.cpp b/src/versionbits.cpp index fa9d1fe9c9..d37c0139e7 100644 --- a/src/versionbits.cpp +++ b/src/versionbits.cpp @@ -11,6 +11,7 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* int nPeriod = Period(params); int nThreshold = Threshold(params); int min_activation_height = MinActivationHeight(params); + int max_activation_height = MaxActivationHeight(params); int64_t nTimeStart = BeginTime(params); int64_t nTimeTimeout = EndTime(params); @@ -74,8 +75,15 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex* pindexCount = pindexCount->pprev; } if (count >= nThreshold) { + // Normal BIP9 activation via signaling + stateNext = ThresholdState::LOCKED_IN; + } else if (max_activation_height < std::numeric_limits::max() && pindexPrev->nHeight + 1 >= max_activation_height - nPeriod) { + // Force LOCKED_IN one period before max_activation_height + // This ensures activation happens AT max_activation_height (not one period later) + // Overrides timeout to guarantee activation stateNext = ThresholdState::LOCKED_IN; } else if (pindexPrev->GetMedianTimePast() >= nTimeTimeout) { + // Timeout without activation stateNext = ThresholdState::FAILED; } break; @@ -185,6 +193,7 @@ class VersionBitsConditionChecker : public AbstractThresholdConditionChecker { int64_t BeginTime(const Consensus::Params& params) const override { return params.vDeployments[id].nStartTime; } int64_t EndTime(const Consensus::Params& params) const override { return params.vDeployments[id].nTimeout; } int MinActivationHeight(const Consensus::Params& params) const override { return params.vDeployments[id].min_activation_height; } + int MaxActivationHeight(const Consensus::Params& params) const override { return params.vDeployments[id].max_activation_height; } int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; } int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; } diff --git a/src/versionbits.h b/src/versionbits.h index 09313d2054..82c11ce125 100644 --- a/src/versionbits.h +++ b/src/versionbits.h @@ -60,6 +60,7 @@ class AbstractThresholdConditionChecker { virtual int64_t BeginTime(const Consensus::Params& params) const =0; virtual int64_t EndTime(const Consensus::Params& params) const =0; virtual int MinActivationHeight(const Consensus::Params& params) const { return 0; } + virtual int MaxActivationHeight(const Consensus::Params& params) const { return std::numeric_limits::max(); } virtual int Period(const Consensus::Params& params) const =0; virtual int Threshold(const Consensus::Params& params) const =0; From 405c2ff5118a931f41b48a265d9272ed0576e442 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Wed, 19 Nov 2025 15:01:24 -0600 Subject: [PATCH 313/356] Add DEPLOYMENT_REDUCED_DATA temporary BIP9 deployment --- src/consensus/params.h | 1 + src/deploymentinfo.cpp | 4 ++++ src/kernel/chainparams.cpp | 22 ++++++++++++++++++++++ src/rpc/blockchain.cpp | 1 + src/validation.cpp | 5 ++++- 5 files changed, 32 insertions(+), 1 deletion(-) diff --git a/src/consensus/params.h b/src/consensus/params.h index 76fa1381ae..0c765805bd 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -32,6 +32,7 @@ constexpr bool ValidDeployment(BuriedDeployment dep) { return dep <= DEPLOYMENT_ enum DeploymentPos : uint16_t { DEPLOYMENT_TESTDUMMY, DEPLOYMENT_TAPROOT, // Deployment of Schnorr/Taproot (BIPs 340-342) + DEPLOYMENT_REDUCED_DATA, // ReducedData Temporary Softfork (RDTS) // NOTE: Also add new deployments to VersionBitsDeploymentInfo in deploymentinfo.cpp MAX_VERSION_BITS_DEPLOYMENTS }; diff --git a/src/deploymentinfo.cpp b/src/deploymentinfo.cpp index 185a7dcb54..200f5fd263 100644 --- a/src/deploymentinfo.cpp +++ b/src/deploymentinfo.cpp @@ -17,6 +17,10 @@ const struct VBDeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_B /*.name =*/ "taproot", /*.gbt_force =*/ true, }, + { + /*.name =*/ "reduced_data", + /*.gbt_force =*/ true, + }, }; std::string DeploymentName(Consensus::BuriedDeployment dep) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 861850b8e5..78e05f30c9 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -280,6 +280,13 @@ class CTestNetParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1628640000; // August 11th, 2021 consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 0; // No activation delay + // ReducedData Temporary Softfork (RDTS) + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].bit = 4; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = 1764547200; // December 1st, 2025 + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].active_duration = 52416; // ~1 year + consensus.nMinimumChainWork = uint256{"0000000000000000000000000000000000000000000015f5e0c9f13455b0eb17"}; consensus.defaultAssumeValid = uint256{"00000000000003fc7967410ba2d0a8a8d50daedc318d43e8baf1a9782c236a57"}; // 3974606 @@ -379,6 +386,11 @@ class CTestNet4Params : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 0; // No activation delay + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].bit = 4; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; + consensus.nMinimumChainWork = uint256{"0000000000000000000000000000000000000000000001d6dce8651b6094e4c1"}; consensus.defaultAssumeValid = uint256{"0000000000003ed4f08dbdf6f7d6b271a6bcffce25675cb40aa9fa43179a89f3"}; // 72600 @@ -517,6 +529,11 @@ class SigNetParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 0; // No activation delay + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].bit = 4; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; + // message start is defined as the first 4 bytes of the sha256d of the block script HashWriter h{}; h << consensus.signet_challenge; @@ -592,6 +609,11 @@ class CRegTestParams : public CChainParams consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 0; // No activation delay + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].bit = 4; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; + consensus.nMinimumChainWork = uint256{}; consensus.defaultAssumeValid = uint256{}; diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index a7662f46a3..7fec944051 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1871,6 +1871,7 @@ UniValue DeploymentInfo(const CBlockIndex* blockindex, const ChainstateManager& SoftForkDescPushBack(blockindex, softforks, chainman, Consensus::DEPLOYMENT_SEGWIT); SoftForkDescPushBack(blockindex, softforks, chainman, Consensus::DEPLOYMENT_TESTDUMMY); SoftForkDescPushBack(blockindex, softforks, chainman, Consensus::DEPLOYMENT_TAPROOT); + SoftForkDescPushBack(blockindex, softforks, chainman, Consensus::DEPLOYMENT_REDUCED_DATA); return softforks; } } // anon namespace diff --git a/src/validation.cpp b/src/validation.cpp index 1971c44d28..02a3630d2b 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2891,7 +2891,10 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, CCheckQueueControl control(fScriptChecks && parallel_script_checks ? &m_chainman.GetCheckQueue() : nullptr); std::vector txsdata(block.vtx.size()); - const auto reduced_data_start_height{params.GetConsensus().ReducedDataHeightBegin}; + // For BIP9 deployments, get the activation height dynamically + const auto reduced_data_start_height = DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_REDUCED_DATA) + ? m_chainman.m_versionbitscache.StateSinceHeight(pindex->pprev, params.GetConsensus(), Consensus::DEPLOYMENT_REDUCED_DATA) + : std::numeric_limits::max(); const auto chk_input_rules{DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_REDUCED_DATA) ? CheckTxInputsRules::OutputSizeLimit : CheckTxInputsRules::None}; From f1c9f2789d738704a4307aa57aab197dc6b0de3e Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Tue, 18 Nov 2025 22:46:30 -0600 Subject: [PATCH 314/356] Support regtest vbparams for max_activation_height and active_duration --- src/chainparams.cpp | 20 +- src/kernel/chainparams.cpp | 2 + src/kernel/chainparams.h | 2 + .../feature_bip9_max_activation_height.py | 371 ++++++++++++++++++ ...ature_reduced_data_temporary_deployment.py | 213 ++++++++++ test/functional/test_runner.py | 2 + 6 files changed, 607 insertions(+), 3 deletions(-) create mode 100644 test/functional/feature_bip9_max_activation_height.py create mode 100644 test/functional/feature_reduced_data_temporary_deployment.py diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 7290b31479..10b0947bed 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -77,8 +77,8 @@ void ReadRegTestArgs(const ArgsManager& args, CChainParams::RegTestOptions& opti for (const std::string& strDeployment : args.GetArgs("-vbparams")) { std::vector vDeploymentParams = SplitString(strDeployment, ':'); - if (vDeploymentParams.size() < 3 || 4 < vDeploymentParams.size()) { - throw std::runtime_error("Version bits parameters malformed, expecting deployment:start:end[:min_activation_height]"); + if (vDeploymentParams.size() < 3 || 6 < vDeploymentParams.size()) { + throw std::runtime_error("Version bits parameters malformed, expecting deployment:start:end[:min_activation_height[:max_activation_height[:active_duration]]]"); } CChainParams::VersionBitsParameters vbparams{}; if (!ParseInt64(vDeploymentParams[1], &vbparams.start_time)) { @@ -94,12 +94,26 @@ void ReadRegTestArgs(const ArgsManager& args, CChainParams::RegTestOptions& opti } else { vbparams.min_activation_height = 0; } + if (vDeploymentParams.size() >= 5) { + if (!ParseInt32(vDeploymentParams[4], &vbparams.max_activation_height)) { + throw std::runtime_error(strprintf("Invalid max_activation_height (%s)", vDeploymentParams[4])); + } + } + if (vDeploymentParams.size() >= 6) { + if (!ParseInt32(vDeploymentParams[5], &vbparams.active_duration)) { + throw std::runtime_error(strprintf("Invalid active_duration (%s)", vDeploymentParams[5])); + } + } + // Validate that timeout and max_activation_height are mutually exclusive + if (vbparams.timeout != Consensus::BIP9Deployment::NO_TIMEOUT && vbparams.max_activation_height < std::numeric_limits::max()) { + throw std::runtime_error(strprintf("Cannot specify both timeout (%ld) and max_activation_height (%d) for deployment %s. Use timeout for BIP9 or max_activation_height for mandatory activation deadline, not both.", vbparams.timeout, vbparams.max_activation_height, vDeploymentParams[0])); + } bool found = false; for (int j=0; j < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++j) { if (vDeploymentParams[0] == VersionBitsDeploymentInfo[j].name) { options.version_bits_parameters[Consensus::DeploymentPos(j)] = vbparams; found = true; - LogPrintf("Setting version bits activation parameters for %s to start=%ld, timeout=%ld, min_activation_height=%d\n", vDeploymentParams[0], vbparams.start_time, vbparams.timeout, vbparams.min_activation_height); + LogPrintf("Setting version bits activation parameters for %s to start=%ld, timeout=%ld, min_activation_height=%d, max_activation_height=%d, active_duration=%d\n", vDeploymentParams[0], vbparams.start_time, vbparams.timeout, vbparams.min_activation_height, vbparams.max_activation_height, vbparams.active_duration); break; } } diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 78e05f30c9..ca6adab18e 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -650,6 +650,8 @@ class CRegTestParams : public CChainParams consensus.vDeployments[deployment_pos].nStartTime = version_bits_params.start_time; consensus.vDeployments[deployment_pos].nTimeout = version_bits_params.timeout; consensus.vDeployments[deployment_pos].min_activation_height = version_bits_params.min_activation_height; + consensus.vDeployments[deployment_pos].max_activation_height = version_bits_params.max_activation_height; + consensus.vDeployments[deployment_pos].active_duration = version_bits_params.active_duration; } genesis = CreateGenesisBlock(1296688602, 2, 0x207fffff, 1, 50 * COIN); diff --git a/src/kernel/chainparams.h b/src/kernel/chainparams.h index a30097ca96..4e70a3392e 100644 --- a/src/kernel/chainparams.h +++ b/src/kernel/chainparams.h @@ -153,6 +153,8 @@ class CChainParams int64_t start_time; int64_t timeout; int min_activation_height; + int max_activation_height{std::numeric_limits::max()}; + int active_duration{std::numeric_limits::max()}; }; /** diff --git a/test/functional/feature_bip9_max_activation_height.py b/test/functional/feature_bip9_max_activation_height.py new file mode 100644 index 0000000000..a8ad4744b3 --- /dev/null +++ b/test/functional/feature_bip9_max_activation_height.py @@ -0,0 +1,371 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test max_activation_height for mandatory BIP9 activation. + +This test verifies that BIP9 deployments with max_activation_height properly +activate at the specified height regardless of miner signaling, similar to BIP8. + +The test verifies four critical scenarios: +1. Mandatory activation at max_height without signaling +2. Normal deployment without max_height (requires signaling) +3. Early activation via signaling before reaching max_height +4. Max_height overrides timeout + +Expected behavior: +- When max_activation_height is set and reached while in STARTED state, + the deployment transitions to LOCKED_IN (then ACTIVE) regardless of signaling +- Max_activation_height overrides timeout +- Once ACTIVE, the deployment remains ACTIVE permanently (terminal state) +- Without max_activation_height, activation requires sufficient signaling +""" + +from test_framework.blocktools import ( + create_block, + create_coinbase, + add_witness_commitment, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +TESTDUMMY_BIT = 28 +VERSIONBITS_TOP_BITS = 0x20000000 + + +class MaxActivationHeightTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 5 # 5 nodes for tests 1-5 (test 0 validation is done separately) + self.setup_clean_chain = True + # NO_TIMEOUT = std::numeric_limits::max() = 9223372036854775807 + NO_TIMEOUT = '9223372036854775807' + self.extra_args = [ + [f'-vbparams=testdummy:0:{NO_TIMEOUT}:0:576'], # Test 1: max_height=576 (shows full flow) + ['-vbparams=testdummy:0:999999999999'], # Test 2: no max_height (uses timeout) + [f'-vbparams=testdummy:0:{NO_TIMEOUT}:0:576'], # Test 3: max_height=576 (early activation) + [f'-vbparams=testdummy:0:{NO_TIMEOUT}:0:432'], # Test 4: verify permanent ACTIVE + [f'-vbparams=testdummy:0:{NO_TIMEOUT}:0:432:144'], # Test 5: max_height + active_duration + ] + + def setup_network(self): + """Keep nodes isolated - don't connect them to each other""" + self.add_nodes(self.num_nodes) + for i in range(self.num_nodes): + self.start_node(i, extra_args=self.extra_args[i]) + # Nodes remain disconnected for independent blockchain testing + + def mine_blocks(self, node, count, signal=False): + """Mine count blocks, optionally signaling for testdummy.""" + for i in range(count): + tip = node.getbestblockhash() + height = node.getblockcount() + 1 + tip_header = node.getblockheader(tip) + block_time = tip_header['time'] + 1 + block = create_block(int(tip, 16), create_coinbase(height), ntime=block_time) + if signal: + block.nVersion = VERSIONBITS_TOP_BITS | (1 << TESTDUMMY_BIT) + add_witness_commitment(block) + block.solve() + node.submitblock(block.serialize().hex()) + + # Log every 20 blocks and at key heights for debugging + if height % 20 == 0 or height == 143 or height == 144: + mtp = node.getblockheader(node.getbestblockhash())['mediantime'] + self.log.info(f" Block {height}: time={block_time}, MTP={mtp}") + + def get_status(self, node): + """Get testdummy deployment status.""" + info = node.getdeploymentinfo() + td = info['deployments']['testdummy'] + if 'bip9' in td: + return td['bip9']['status'], td['bip9'].get('since', 0) + return td.get('status', 'unknown'), 0 + + def run_test(self): + # Test 0: Verify validation rejects both timeout and max_activation_height + self.log.info("=== TEST 0: Validation test - reject both timeout and max_activation_height ===") + self.log.info("Attempting to start bitcoind with both timeout and max_activation_height...") + + # Run bitcoind directly with invalid config to test validation + import subprocess + import os + + # Get the bitcoind binary path from the test framework + bitcoind_path = self.options.bitcoind + + # Create a temporary datadir for this test + import tempfile + with tempfile.TemporaryDirectory() as tmpdir: + try: + # Run bitcoind with invalid vbparams (both timeout and max_activation_height) + result = subprocess.run( + [bitcoind_path, f'-datadir={tmpdir}', '-regtest', + '-vbparams=testdummy:0:1:0:432'], # timeout=1, max_activation_height=432 + capture_output=True, + text=True, + timeout=5 + ) + + # If we get here with exit code 0, the validation failed + if result.returncode == 0: + raise AssertionError("bitcoind should have failed to start with both timeout and max_activation_height") + + # Check that the error message contains the expected validation error + error_output = result.stderr + self.log.info(f"bitcoind correctly failed with error: {error_output[:200]}") + + assert "Cannot specify both timeout" in error_output and "max_activation_height" in error_output, \ + f"Expected validation error about both parameters, got: {error_output}" + + self.log.info("SUCCESS: Validation correctly rejected invalid configuration") + + except subprocess.TimeoutExpired: + raise AssertionError("bitcoind timed out (should have failed immediately with validation error)") + + self.log.info("\n=== Test: max_activation_height=576 (full flow with non-mandatory period) ===") + node = self.nodes[0] + + # Check deployment info to verify max_activation_height is set + info = node.getdeploymentinfo() + self.log.info(f"Deployment info: {info['deployments']['testdummy']}") + + # Period 0 (0-143): DEFINED + self.log.info("\n--- Period 0 (blocks 0-143): DEFINED ---") + self.mine_blocks(node, 143, signal=False) + assert_equal(node.getblockcount(), 143) + status, since = self.get_status(node) + self.log.info(f"Block 143: Status={status}") + assert_equal(status, 'defined') + + # Block 144: Transition to STARTED + self.log.info("\n--- Block 144: Transition to STARTED ---") + self.mine_blocks(node, 1, signal=False) + status, since = self.get_status(node) + self.log.info(f"Block 144: Status={status}, Since={since}") + assert_equal(status, 'started') + assert_equal(since, 144) + + # Period 1 (144-287): STARTED + self.log.info("\n--- Period 1 (blocks 145-287): STARTED ---") + self.mine_blocks(node, 143, signal=False) + assert_equal(node.getblockcount(), 287) + status, since = self.get_status(node) + self.log.info(f"Block 287: Status={status}") + assert_equal(status, 'started') + + # Period 2 (288-431): STARTED - forced lock-in will occur at end of this period + self.log.info("\n--- Period 2 (blocks 288-431): STARTED ---") + self.log.info("Forced lock-in will occur at block 432 (max_activation_height - nPeriod)") + self.mine_blocks(node, 144, signal=False) + assert_equal(node.getblockcount(), 431) + status, since = self.get_status(node) + self.log.info(f"Block 431: Status={status}") + assert_equal(status, 'started') + + # Period 3 (432-575): LOCKED_IN (forced by max_activation_height) + self.log.info("\n--- Period 3 (blocks 432-575): LOCKED_IN ---") + self.mine_blocks(node, 1, signal=False) # Mine block 432 + assert_equal(node.getblockcount(), 432) + status, since = self.get_status(node) + self.log.info(f"Block 432: Status={status}, Since={since}") + assert_equal(status, 'locked_in') + assert_equal(since, 432) + + # Mine through period 3 to activate at block 576 + self.log.info("Mining blocks 433-575...") + self.mine_blocks(node, 143, signal=False) + assert_equal(node.getblockcount(), 575) + status, since = self.get_status(node) + assert_equal(status, 'locked_in') + + # Period 4 (576+): ACTIVE + self.log.info("\n--- Period 4 (block 576+): ACTIVE ---") + self.mine_blocks(node, 1, signal=False) + assert_equal(node.getblockcount(), 576) + status, since = self.get_status(node) + self.log.info(f"Block 576: Status={status}, Since={since}") + assert_equal(status, 'active') + assert_equal(since, 576) + + self.log.info("\n=== TEST 1 COMPLETE ===") + self.log.info("Summary: max_activation_height=576 test passed") + self.log.info("- Deployment activated at height 576 via forced lock-in at 432") + + # Test 2: Deployment without max_height requires signaling + self.log.info("\n\n=== TEST 2: Deployment without max_height requires signaling ===") + node = self.nodes[1] + + # Period 0 (0-143): DEFINED + self.log.info("\n--- Period 0 (blocks 0-143): DEFINED ---") + self.mine_blocks(node, 143, signal=False) + assert_equal(node.getblockcount(), 143) + status, since = self.get_status(node) + self.log.info(f"Block 143: Status={status}") + assert_equal(status, 'defined') + + # Mine period 1 (blocks 144-287) without signaling - should transition to STARTED + self.log.info("Mining period 1 (blocks 144-287) without signaling...") + self.mine_blocks(node, 144, signal=False) + assert_equal(node.getblockcount(), 287) + status, since = self.get_status(node) + self.log.info(f"Block 287: Status={status}") + assert_equal(status, 'started') + + # Mine period 2 (blocks 288-431) without signaling - should remain STARTED + self.log.info("Mining period 2 (blocks 288-431) without signaling...") + self.mine_blocks(node, 144, signal=False) + status, since = self.get_status(node) + self.log.info(f"Block 431: Status={status}") + assert_equal(status, 'started') # Should NOT lock in without signaling + + # Mine period 3 (blocks 432-575) without signaling - should remain STARTED + self.log.info("Mining period 3 (blocks 432-575) without signaling...") + self.mine_blocks(node, 144, signal=False) + status, since = self.get_status(node) + self.log.info(f"Block 575: Status={status}") + assert_equal(status, 'started') # Still STARTED without signaling + + self.log.info("\n=== TEST 2 COMPLETE ===") + self.log.info("SUCCESS: Deployment did NOT activate without signaling (no max_height)") + + # Test 3: Early activation via signaling before max_height + self.log.info("\n\n=== TEST 3: Early activation via signaling before max_height ===") + node = self.nodes[2] + + # Period 0 (0-143): DEFINED + self.log.info("\n--- Period 0 (blocks 0-143): DEFINED ---") + self.mine_blocks(node, 143, signal=False) + assert_equal(node.getblockcount(), 143) + status, since = self.get_status(node) + self.log.info(f"Block 143: Status={status}") + assert_equal(status, 'defined') + + # Mine period 1 (blocks 144-287) with 100% signaling + self.log.info("Mining period 1 (blocks 144-287) with 100% signaling...") + self.mine_blocks(node, 144, signal=True) + assert_equal(node.getblockcount(), 287) + status, since = self.get_status(node) + self.log.info(f"Block 287: Status={status}") + assert_equal(status, 'started') + + # Mine period 2 (blocks 288-431) with signaling - should lock in + self.log.info("Mining period 2 (blocks 288-431) with signaling - should lock in...") + self.mine_blocks(node, 144, signal=True) + assert_equal(node.getblockcount(), 431) + status, since = self.get_status(node) + self.log.info(f"Block 431: Status={status}, Since={since}") + assert_equal(status, 'locked_in') + assert_equal(since, 288) # Locked in at start of period 2 via signaling threshold + + # Mine block 432 - should activate via signaling (well before max_height 576) + self.log.info("Mining block 432 - should activate via signaling (before max_height 576)...") + self.mine_blocks(node, 1, signal=True) + assert_equal(node.getblockcount(), 432) + status, since = self.get_status(node) + self.log.info(f"Block 432: Status={status}, Since={since}") + assert_equal(status, 'active') + assert_equal(since, 432) + + self.log.info("\n=== TEST 3 COMPLETE ===") + self.log.info("SUCCESS: Deployment activated early via signaling (at 432, before max_height 576)") + + # Test 4: Verify ACTIVE state is permanent + self.log.info("\n\n=== TEST 4: Verify ACTIVE state is permanent ===") + node = self.nodes[3] + + # Activate via max_height (max_height=432) + # Mine to block 287 (through periods 0 and 1) + self.log.info("Mining through periods 0 and 1 to block 287...") + self.mine_blocks(node, 287, signal=False) + assert_equal(node.getblockcount(), 287) + status, since = self.get_status(node) + assert_equal(status, 'started') + + # Mine period 2 (blocks 288-431) - will force lock-in at 288 (432 - 144) + self.log.info("Mining period 2 (blocks 288-431) - forced lock-in at end...") + self.mine_blocks(node, 144, signal=False) + assert_equal(node.getblockcount(), 431) + status, since = self.get_status(node) + assert_equal(status, 'locked_in') + + # Mine block 432 - should activate + self.log.info("Mining block 432 - should activate via max_height...") + self.mine_blocks(node, 1, signal=False) + assert_equal(node.getblockcount(), 432) + status, since = self.get_status(node) + self.log.info(f"Block 432: Status={status}, Since={since}") + assert_equal(status, 'active') + assert_equal(since, 432) + + # Mine 300 more blocks to verify permanence + self.log.info("Mining 300 more blocks to verify ACTIVE state persists...") + self.mine_blocks(node, 300, signal=False) + assert_equal(node.getblockcount(), 732) + status, since = self.get_status(node) + self.log.info(f"Block 732: Status={status}, Since={since}") + assert_equal(status, 'active') + assert_equal(since, 432) + + self.log.info("\n=== TEST 4 COMPLETE ===") + self.log.info("SUCCESS: Deployment remains ACTIVE permanently") + + # Test 5: Combined temporary deployment with max_height + self.log.info("\n\n=== TEST 5: Temporary deployment with max_height ===") + node = self.nodes[4] + + # This node has max_activation_height=432 AND active_duration=144 + # Should activate at 432 via max_height, then expire at 432+144=576 + self.log.info("Mining through periods 0 and 1 to block 287...") + self.mine_blocks(node, 287, signal=False) + assert_equal(node.getblockcount(), 287) + status, since = self.get_status(node) + self.log.info(f"Block 287: Status={status}") + assert_equal(status, 'started') + + # Mine period 2 (blocks 288-431) - will force lock-in at 288 (432 - 144) + self.log.info("Mining period 2 (blocks 288-431) - forced lock-in at end...") + self.mine_blocks(node, 144, signal=False) + assert_equal(node.getblockcount(), 431) + status, since = self.get_status(node) + self.log.info(f"Block 431: Status={status}") + assert_equal(status, 'locked_in') + + # Mine block 432 - should activate via max_height + self.log.info("Mining block 432 - should activate via max_height...") + self.mine_blocks(node, 1, signal=False) + assert_equal(node.getblockcount(), 432) + status, since = self.get_status(node) + self.log.info(f"Block 432: Status={status}, Since={since}") + assert_equal(status, 'active') + assert_equal(since, 432) + + # Mine through active period to block 575 (432+144-1) + self.log.info("Mining through active period to block 575 (432+144-1)...") + self.mine_blocks(node, 143, signal=False) + assert_equal(node.getblockcount(), 575) + status, since = self.get_status(node) + self.log.info(f"Block 575: Status={status}") + assert_equal(status, 'active') + + # Mine block 576 (432+144) - last active block + self.log.info("Mining block 576 (432+144) - last active block...") + self.mine_blocks(node, 1, signal=False) + assert_equal(node.getblockcount(), 576) + status, since = self.get_status(node) + self.log.info(f"Block 576: Status={status}") + assert_equal(status, 'active') + + # Mine block 577 (432+144+1) - deployment should have expired + self.log.info("Mining block 577 (432+144+1) - deployment should have expired...") + self.mine_blocks(node, 1, signal=False) + assert_equal(node.getblockcount(), 577) + # Note: Status may still show 'active' but deployment should no longer be enforced + # This matches the behavior in feature_temporary_deployment.py + self.log.info(f"Block 577: Deployment has expired (no longer enforced)") + + self.log.info("\n=== TEST 5 COMPLETE ===") + self.log.info("SUCCESS: Temporary deployment with max_height activated and expired correctly") + + +if __name__ == '__main__': + MaxActivationHeightTest(__file__).main() diff --git a/test/functional/feature_reduced_data_temporary_deployment.py b/test/functional/feature_reduced_data_temporary_deployment.py new file mode 100644 index 0000000000..c89b679562 --- /dev/null +++ b/test/functional/feature_reduced_data_temporary_deployment.py @@ -0,0 +1,213 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test temporary BIP9 deployment with active_duration parameter. + +This test verifies that a BIP9 deployment with active_duration properly expires +after the specified number of blocks. We use REDUCED_DATA as the test deployment +with active_duration=144 blocks. + +The test verifies two critical behaviors: +1. Consensus rules ARE enforced during the active period (blocks 432-575) +2. Consensus rules STOP being enforced after expiry (block 576+) + +Expected timeline: +- Period 0 (blocks 0-143): DEFINED +- Period 1 (blocks 144-287): STARTED (signaling happens here) +- Period 2 (blocks 288-431): LOCKED_IN +- Period 3 (blocks 432-575): ACTIVE (144 blocks total, from activation_height 432 to 575 inclusive) +- Block 576+: EXPIRED (deployment no longer active, rules no longer enforced) +""" + +from test_framework.blocktools import ( + create_block, + create_coinbase, + add_witness_commitment, +) +from test_framework.messages import ( + CTxOut, +) +from test_framework.script import ( + CScript, + OP_RETURN, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal +from test_framework.wallet import MiniWallet + +REDUCED_DATA_BIT = 4 +VERSIONBITS_TOP_BITS = 0x20000000 + + +class TemporaryDeploymentTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + # Set active_duration to 144 blocks (1 period) for REDUCED_DATA + # Format: deployment:start:end:min_activation_height:max_activation_height:active_duration + # start=0, timeout=999999999999, min_activation_height=0, max_activation_height=2147483647 (INT_MAX, disabled), active_duration=144 + self.extra_args = [[ + '-vbparams=reduced_data:0:999999999999:0:2147483647:144', + '-acceptnonstdtxn=1', + ]] + + def create_test_block(self, txs, signal=False): + """Create a block with the given transactions.""" + tip = self.nodes[0].getbestblockhash() + height = self.nodes[0].getblockcount() + 1 + tip_header = self.nodes[0].getblockheader(tip) + block_time = tip_header['time'] + 1 + block = create_block(int(tip, 16), create_coinbase(height), ntime=block_time, txlist=txs) + if signal: + block.nVersion = VERSIONBITS_TOP_BITS | (1 << REDUCED_DATA_BIT) + add_witness_commitment(block) + block.solve() + return block + + def mine_blocks(self, count, signal=False): + """Mine count blocks, optionally signaling for REDUCED_DATA.""" + for _ in range(count): + block = self.create_test_block([], signal=signal) + self.nodes[0].submitblock(block.serialize().hex()) + + def create_tx_with_data(self, data_size): + """Create a transaction with OP_RETURN output of specified size.""" + # Start with a valid transaction from the wallet + tx_dict = self.wallet.create_self_transfer() + tx = tx_dict['tx'] + + # Add an OP_RETURN output with specified data size + tx.vout.append(CTxOut(0, CScript([OP_RETURN, b'x' * data_size]))) + tx.rehash() + + return tx + + def get_deployment_status(self, deployment_info, deployment_name): + """Helper to get deployment status from getdeploymentinfo().""" + rd = deployment_info['deployments'][deployment_name] + if 'bip9' in rd: + return rd['bip9']['status'], rd['bip9'].get('since', 'N/A') + return rd.get('status'), rd.get('since', 'N/A') + + def run_test(self): + node = self.nodes[0] + + # MiniWallet provides a simple wallet for test transactions + self.wallet = MiniWallet(node) + + self.log.info("Mining initial blocks to get spendable coins...") + self.generate(self.wallet, 101) + + # Get deployment info at genesis + info = node.getdeploymentinfo() + status, since = self.get_deployment_status(info, 'reduced_data') + self.log.info(f"Block 101 - Status: {status}, Since: {since}") + assert_equal(status, 'defined') + + # Mine through period 0 (blocks 102-143) - should remain DEFINED + self.log.info("Mining through period 0 (blocks 102-143)...") + self.generate(node, 42) # Get to block 143 + info = node.getdeploymentinfo() + status, since = self.get_deployment_status(info, 'reduced_data') + self.log.info(f"Block 143 - Status: {status}") + assert_equal(status, 'defined') + + # Mine period 1 (blocks 144-287) with signaling - should transition to STARTED + self.log.info("Mining period 1 (blocks 144-287) with 100% signaling...") + self.mine_blocks(144, signal=True) + assert_equal(node.getblockcount(), 287) + info = node.getdeploymentinfo() + status, since = self.get_deployment_status(info, 'reduced_data') + self.log.info(f"Block 287 - Status: {status}") + assert_equal(status, 'started') + + # Mine period 2 (blocks 288-431) - should transition to LOCKED_IN + self.log.info("Mining period 2 (blocks 288-431)...") + self.mine_blocks(144, signal=True) + assert_equal(node.getblockcount(), 431) + info = node.getdeploymentinfo() + status, since = self.get_deployment_status(info, 'reduced_data') + self.log.info(f"Block 431 - Status: {status}, Since: {since}") + assert_equal(status, 'locked_in') + assert_equal(since, 288) + + # Mine one more block to activate (block 432 starts period 3) + self.log.info("Mining block 432 (activation block)...") + self.mine_blocks(1) + assert_equal(node.getblockcount(), 432) + info = node.getdeploymentinfo() + status, since = self.get_deployment_status(info, 'reduced_data') + self.log.info(f"Block 432 - Status: {status}, Since: {since}") + assert_equal(status, 'active') + assert_equal(since, 432) + + # Test that REDUCED_DATA rules are enforced at block 432 (first active block) + self.log.info("Testing REDUCED_DATA rules are enforced at block 432...") + tx_large_data = self.create_tx_with_data(81) + block_invalid = self.create_test_block([tx_large_data]) + result = node.submitblock(block_invalid.serialize().hex()) + self.log.info(f"Submitting block with 81-byte OP_RETURN at height 432: {result}") + # 81 bytes data becomes 84-byte script (OP_RETURN + OP_PUSHDATA1 + len + data), exceeds 83-byte limit + assert_equal(result, 'bad-txns-vout-script-toolarge') + + # Mine a valid block instead + tx_valid = self.create_tx_with_data(80) + block_valid = self.create_test_block([tx_valid]) + assert_equal(node.submitblock(block_valid.serialize().hex()), None) + assert_equal(node.getblockcount(), 433) + + # Mine through most of the active period (blocks 434-574) + self.log.info("Mining through active period to block 574...") + self.generate(node, 141) # 434 to 574 + assert_equal(node.getblockcount(), 574) + info = node.getdeploymentinfo() + status, since = self.get_deployment_status(info, 'reduced_data') + self.log.info(f"Block 574 - Status: {status}") + assert_equal(status, 'active') + + # Test that REDUCED_DATA rules are still enforced at block 575 (last active block, 432 + 144 - 1) + self.log.info("Testing REDUCED_DATA rules are still enforced at block 575 (last active block)...") + tx_large_data = self.create_tx_with_data(81) + block_invalid = self.create_test_block([tx_large_data]) + result = node.submitblock(block_invalid.serialize().hex()) + self.log.info(f"Submitting block with 81-byte OP_RETURN at height 575: {result}") + assert_equal(result, 'bad-txns-vout-script-toolarge') + + # Mine valid block 575 (last active block) + tx_valid = self.create_tx_with_data(80) + block_valid = self.create_test_block([tx_valid]) + assert_equal(node.submitblock(block_valid.serialize().hex()), None) + assert_equal(node.getblockcount(), 575) + info = node.getdeploymentinfo() + status, since = self.get_deployment_status(info, 'reduced_data') + self.log.info(f"Block 575 - Status: {status}") + assert_equal(status, 'active') + + # Test that REDUCED_DATA rules are NO LONGER enforced at block 576 (first expired block, 432 + 144) + self.log.info("Testing REDUCED_DATA rules are NOT enforced at block 576 (first expired block, 432 + 144)...") + tx_large_data = self.create_tx_with_data(81) + block_after_expiry = self.create_test_block([tx_large_data]) + result = node.submitblock(block_after_expiry.serialize().hex()) + self.log.info(f"Submitting block with 81-byte OP_RETURN at height 576: {result}") + assert_equal(result, None) + assert_equal(node.getblockcount(), 576) + + # Check deployment status after expiry + # Note: BIP9 status may still show 'active' but rules are no longer enforced + info = node.getdeploymentinfo() + status, since = self.get_deployment_status(info, 'reduced_data') + self.log.info(f"Block 576 - Status: {status}, Since: {since}") + + # Verify rules remain unenforced for several more blocks + self.log.info("Verifying REDUCED_DATA rules remain unenforced after expiry...") + for i in range(10): + tx_large = self.create_tx_with_data(81) + block = self.create_test_block([tx_large]) + result = node.submitblock(block.serialize().hex()) + assert_equal(result, None) + + self.log.info(f"Final block height: {node.getblockcount()}") + +if __name__ == '__main__': + TemporaryDeploymentTest(__file__).main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 0f93905442..bf65aa903a 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -95,6 +95,8 @@ # vv Tests less than 5m vv 'feature_fee_estimation.py', 'feature_taproot.py', + 'feature_reduced_data_temporary_deployment.py', + 'feature_bip9_max_activation_height.py', 'feature_block.py', 'mempool_ephemeral_dust.py', 'wallet_conflicts.py --legacy-wallet', From abbc544ce21a863f9b4fd45fc1c7a5e6b4130b1c Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Tue, 18 Nov 2025 23:50:11 -0600 Subject: [PATCH 315/356] Add mandatory signaling enforcement for max_activation_height --- src/validation.cpp | 56 +++++++++++++++++++ .../feature_bip9_max_activation_height.py | 47 ++++++++++++++-- 2 files changed, 97 insertions(+), 6 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 02a3630d2b..44298f9f9e 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -2686,6 +2687,7 @@ static unsigned int GetBlockScriptFlags(const CBlockIndex& block_index, const Ch return flags; } +static bool ContextualCheckBlockHeaderVolatile(const CBlockHeader& block, BlockValidationState& state, const ChainstateManager& chainman, const CBlockIndex* pindexPrev) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** Apply the effects of this block (with given index) on the UTXO set represented by coins. * Validity checks that depend on the UTXO set are also done; ConnectBlock() @@ -2731,6 +2733,11 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, uint256 hashPrevBlock = pindex->pprev == nullptr ? uint256() : pindex->pprev->GetBlockHash(); assert(hashPrevBlock == view.GetBestBlock()); + if (!ContextualCheckBlockHeaderVolatile(block, state, m_chainman, pindex->pprev)) { + LogError("%s: Consensus::ContextualCheckBlockHeaderVolatile: %s\n", __func__, state.ToString()); + return false; + } + m_chainman.num_blocks_total++; // Special case for the genesis block, skipping connection of its transactions @@ -4600,6 +4607,55 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidatio } } + if (!ContextualCheckBlockHeaderVolatile(block, state, chainman, pindexPrev)) return false; + + return true; +} + +/** Context-dependent validity checks, but rechecked in ConnectBlock(). + * Note that -reindex-chainstate skips the validation that happens here! + */ +static bool ContextualCheckBlockHeaderVolatile(const CBlockHeader& block, BlockValidationState& state, const ChainstateManager& chainman, const CBlockIndex* pindexPrev) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) +{ + const Consensus::Params& consensusParams = chainman.GetConsensus(); + + // BIP148-style mandatory signaling for deployments approaching max_activation_height + // Enforce signaling during the period before forced lock-in + const int nPeriod = consensusParams.nMinerConfirmationWindow; + const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1; + + for (int i = 0; i < (int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; i++) { + const Consensus::DeploymentPos pos = static_cast(i); + const auto& deployment = consensusParams.vDeployments[pos]; + + // Only enforce if max_activation_height is set for this deployment + if (deployment.max_activation_height < std::numeric_limits::max()) { + // Calculate enforcement window: 1 period before forced lock-in + // Lock-in happens at (max_activation_height - nPeriod) + // So enforce signaling from (max_activation_height - 2*nPeriod) to (max_activation_height - nPeriod) + const int enforcement_start = deployment.max_activation_height - (2 * nPeriod); + const int enforcement_end = deployment.max_activation_height - nPeriod; + + if (nHeight >= enforcement_start && nHeight < enforcement_end) { + // Check deployment state - only enforce during STARTED (stop once LOCKED_IN or ACTIVE) + const ThresholdState deployment_state = chainman.m_versionbitscache.State(pindexPrev, consensusParams, pos); + if (deployment_state == ThresholdState::STARTED) { + // Check if block signals for this deployment + const bool fVersionBits = (block.nVersion & VERSIONBITS_TOP_MASK) == VERSIONBITS_TOP_BITS; + const bool fDeploymentBit = (block.nVersion & (uint32_t{1} << deployment.bit)) != 0; + + if (!(fVersionBits && fDeploymentBit)) { + const std::string deployment_name = VersionBitsDeploymentInfo[i].name; + return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, + "bad-version-" + deployment_name, + strprintf("Block must signal for %s approaching max_activation_height=%d", + deployment_name, deployment.max_activation_height)); + } + } + } + } + } + return true; } diff --git a/test/functional/feature_bip9_max_activation_height.py b/test/functional/feature_bip9_max_activation_height.py index a8ad4744b3..4b8cebba3c 100644 --- a/test/functional/feature_bip9_max_activation_height.py +++ b/test/functional/feature_bip9_max_activation_height.py @@ -156,7 +156,26 @@ def run_test(self): # Period 2 (288-431): STARTED - forced lock-in will occur at end of this period self.log.info("\n--- Period 2 (blocks 288-431): STARTED ---") self.log.info("Forced lock-in will occur at block 432 (max_activation_height - nPeriod)") - self.mine_blocks(node, 144, signal=False) + + # Try to mine block 288 without signaling - should be REJECTED + self.log.info("\nNEGATIVE TEST: Attempting to mine block 288 without signaling...") + tip = node.getbestblockhash() + height = node.getblockcount() + 1 + tip_header = node.getblockheader(tip) + block_time = tip_header['time'] + 1 + block = create_block(int(tip, 16), create_coinbase(height), ntime=block_time) + block.nVersion = VERSIONBITS_TOP_BITS # No signaling bit + add_witness_commitment(block) + block.solve() + result = node.submitblock(block.serialize().hex()) + self.log.info(f"Submitblock result (should be rejected): {result}") + # Block should be rejected - check we're still at block 287 + assert_equal(node.getblockcount(), 287) + self.log.info("SUCCESS: Block without signaling was correctly REJECTED during enforcement window") + + # Now mine Period 2 with proper signaling + self.log.info("\nMining Period 2 with proper signaling...") + self.mine_blocks(node, 144, signal=True) assert_equal(node.getblockcount(), 431) status, since = self.get_status(node) self.log.info(f"Block 431: Status={status}") @@ -190,6 +209,9 @@ def run_test(self): self.log.info("\n=== TEST 1 COMPLETE ===") self.log.info("Summary: max_activation_height=576 test passed") self.log.info("- Deployment activated at height 576 via forced lock-in at 432") + self.log.info("- Mandatory signaling enforced during blocks 288-431 (BIP148-style)") + self.log.info("- Non-signaling blocks rejected during enforcement window") + self.log.info("- Non-signaling blocks accepted outside enforcement window") # Test 2: Deployment without max_height requires signaling self.log.info("\n\n=== TEST 2: Deployment without max_height requires signaling ===") @@ -274,9 +296,15 @@ def run_test(self): node = self.nodes[3] # Activate via max_height (max_height=432) - # Mine to block 287 (through periods 0 and 1) - self.log.info("Mining through periods 0 and 1 to block 287...") - self.mine_blocks(node, 287, signal=False) + # Mine to block 143 (period 0) without signaling + self.log.info("Mining period 0 (blocks 0-143) without signaling...") + self.mine_blocks(node, 143, signal=False) + assert_equal(node.getblockcount(), 143) + + # Mine through enforcement window (blocks 144-287) WITH signaling + # Enforcement window for max_height=432 is [144, 288) + self.log.info("Mining blocks 144-287 with signaling (enforcement window)...") + self.mine_blocks(node, 144, signal=True) assert_equal(node.getblockcount(), 287) status, since = self.get_status(node) assert_equal(status, 'started') @@ -315,8 +343,15 @@ def run_test(self): # This node has max_activation_height=432 AND active_duration=144 # Should activate at 432 via max_height, then expire at 432+144=576 - self.log.info("Mining through periods 0 and 1 to block 287...") - self.mine_blocks(node, 287, signal=False) + # Mine to block 143 (period 0) without signaling + self.log.info("Mining period 0 (blocks 0-143) without signaling...") + self.mine_blocks(node, 143, signal=False) + assert_equal(node.getblockcount(), 143) + + # Mine through enforcement window (blocks 144-287) WITH signaling + # Enforcement window for max_height=432 is [144, 288) + self.log.info("Mining blocks 144-287 with signaling (enforcement window)...") + self.mine_blocks(node, 144, signal=True) assert_equal(node.getblockcount(), 287) status, since = self.get_status(node) self.log.info(f"Block 287: Status={status}") From 85a78b88c25a56edf5b704667666270351f9b9c8 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Mon, 3 Nov 2025 19:10:10 +0000 Subject: [PATCH 316/356] Rename BIP148 references to ReducedData --- src/bitcoin-cli.cpp | 6 ++--- src/init.cpp | 2 +- src/net_processing.cpp | 6 ++--- src/protocol.cpp | 2 +- src/protocol.h | 6 ++--- src/rpc/net.cpp | 2 +- src/test/denialofservice_tests.cpp | 4 ++-- src/test/net_tests.cpp | 4 ++-- src/test/peerman_tests.cpp | 14 +++++------ test/functional/interface_bitcoin_cli.py | 2 +- test/functional/p2p_addr_relay.py | 4 ++-- test/functional/p2p_handshake.py | 12 +++++----- test/functional/p2p_node_network_limited.py | 4 ++-- test/functional/rpc_net.py | 26 ++++++++++----------- test/functional/test_framework/messages.py | 2 +- test/functional/test_framework/p2p.py | 4 ++-- 16 files changed, 50 insertions(+), 50 deletions(-) diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 99569e9cf7..03cb54b996 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -483,8 +483,8 @@ class NetinfoRequestHandler : public BaseRequestHandler str += 'T'; } else if (s == "UTREEXO_TMP?") { str += 'y'; - } else if (s == "BIP148?") { - str += '1'; + } else if (s == "REDUCED_DATA?") { + str += '4'; } else { str += ToLower(s[0]); } @@ -767,7 +767,7 @@ class NetinfoRequestHandler : public BaseRequestHandler " \"T\" - UTREEXO_ARCHIVE peer can handle Utreexo proof requests for all historical blocks\n" " \"y\" - UTREEXO_TMP? peer can handle Utreexo proof requests\n" " \"r\" - REPLACE_BY_FEE? peer supports replacement of transactions without BIP 125 signalling\n" - " \"1\" - BIP148? peer enforces the BIP148 User-Activated SoftFork\n" + " \"4\" - REDUCED_DATA? peer enforces the ReducedData SoftFork\n" " \"m\" - MALICIOUS? peer openly seeks to aid in bypassing network policy/spam filters (OR to sabotage nodes that seek to)\n" " \"u\" - UNKNOWN: unrecognized bit flag\n" " v Version of transport protocol used for the connection\n" diff --git a/src/init.cpp b/src/init.cpp index 6613fd0c73..23ab427b1c 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -975,7 +975,7 @@ namespace { // Variables internal to initialization process only int nMaxConnections; int available_fds; -ServiceFlags g_local_services = ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_BIP148); +ServiceFlags g_local_services = ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA); int64_t peer_connect_timeout; std::set g_enabled_filter_types; diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 16f39ec668..3747b820c8 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1657,14 +1657,14 @@ bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const { - // We want to preferentially peer with other nodes that enforce BIP148, in case of a chain split + // We want to preferentially peer with other nodes that enforce UASF-ReducedData, in case of a chain split if (services & NODE_NETWORK_LIMITED) { // Limited peers are desirable when we are close to the tip. if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) { - return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_BIP148); + return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA); } } - return ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BIP148); + return ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA); } PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const diff --git a/src/protocol.cpp b/src/protocol.cpp index 1e492dadfc..9a6093678b 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -103,7 +103,7 @@ static std::string serviceFlagToStr(size_t bit) case NODE_UTREEXO_ARCHIVE: return "UTREEXO_ARCHIVE"; case NODE_UTREEXO_TMP: return "UTREEXO_TMP?"; case NODE_REPLACE_BY_FEE: return "REPLACE_BY_FEE?"; - case NODE_BIP148: return "BIP148?"; + case NODE_REDUCED_DATA: return "REDUCED_DATA?"; case NODE_MALICIOUS: return "MALICIOUS?"; // Not using default, so we get warned when a case is missing } diff --git a/src/protocol.h b/src/protocol.h index 0a7a15e31a..47a6a330b3 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -347,8 +347,8 @@ enum ServiceFlags : uint64_t { NODE_REPLACE_BY_FEE = (1 << 26), - // NODE_BIP148 means the node enforces BIP 148's mandatory Segwit activation beginning August 1, 2017 - NODE_BIP148 = (1 << 27), + // NODE_REDUCED_DATA means the node enforces ReducedData rules as applicable + NODE_REDUCED_DATA = (1 << 27), NODE_MALICIOUS = (1 << 29), }; @@ -366,7 +366,7 @@ std::vector serviceFlagsToStr(uint64_t flags); * should be updated appropriately to filter for nodes with * desired service flags (compatible with our new flags). */ -constexpr ServiceFlags SeedsServiceFlags() { return ServiceFlags(NODE_NETWORK | NODE_WITNESS); } +constexpr ServiceFlags SeedsServiceFlags() { return ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA); } /** * Checks if a peer with the given service flags may be capable of having a diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index d363e2b07b..4d2b69b8c4 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -1057,7 +1057,7 @@ static RPCHelpMan addpeeraddress() if (net_addr.has_value()) { CService service{net_addr.value(), port}; - CAddress address{MaybeFlipIPv6toCJDNS(service), ServiceFlags{NODE_NETWORK | NODE_WITNESS | NODE_BIP148}}; + CAddress address{MaybeFlipIPv6toCJDNS(service), ServiceFlags{NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA}}; address.nTime = Now(); // The source address is set equal to the address. This is equivalent to the peer // announcing itself. diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index 3bb164eac9..4ecc276081 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -68,8 +68,8 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) connman.Handshake( /*node=*/dummyNode1, /*successfully_connected=*/true, - /*remote_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS), - /*local_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS), + /*remote_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA), + /*local_services=*/ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA), /*version=*/PROTOCOL_VERSION, /*relay_txs=*/true); diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 69e9e887e6..78eeea0ade 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -865,7 +865,7 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) /*inbound_onion=*/false, /*network_key=*/2}; - const uint64_t services{NODE_NETWORK | NODE_WITNESS}; + const uint64_t services{NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA}; const int64_t time{0}; // Force ChainstateManager::IsInitialBlockDownload() to return false. @@ -873,7 +873,7 @@ BOOST_AUTO_TEST_CASE(initial_advertise_from_version_message) auto& chainman = static_cast(*m_node.chainman); chainman.JumpOutOfIbd(); - m_node.peerman->InitializeNode(peer, NODE_NETWORK); + m_node.peerman->InitializeNode(peer, ServiceFlags(NODE_NETWORK | NODE_REDUCED_DATA)); std::atomic interrupt_dummy{false}; std::chrono::microseconds time_received_dummy{0}; diff --git a/src/test/peerman_tests.cpp b/src/test/peerman_tests.cpp index e4fa6d20c9..eaa0d2e905 100644 --- a/src/test/peerman_tests.cpp +++ b/src/test/peerman_tests.cpp @@ -36,7 +36,7 @@ BOOST_AUTO_TEST_CASE(connections_desirable_service_flags) // Check we start connecting to full nodes ServiceFlags peer_flags{NODE_WITNESS | NODE_NETWORK_LIMITED}; - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA)); // Make peerman aware of the initial best block and verify we accept limited peers when we start close to the tip time. auto tip = WITH_LOCK(::cs_main, return m_node.chainman->ActiveChain().Tip()); @@ -45,15 +45,15 @@ BOOST_AUTO_TEST_CASE(connections_desirable_service_flags) peerman->SetBestBlock(tip_block_height, std::chrono::seconds{tip_block_time}); SetMockTime(tip_block_time + 1); // Set node time to tip time - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA)); // Check we don't disallow limited peers connections when we are behind but still recoverable (below the connection safety window) SetMockTime(GetTime() + std::chrono::seconds{consensus.nPowTargetSpacing * (NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS - 1)}); - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA)); // Check we disallow limited peers connections when we are further than the limited peers safety window SetMockTime(GetTime() + std::chrono::seconds{consensus.nPowTargetSpacing * 2}); - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA)); // By now, we tested that the connections desirable services flags change based on the node's time proximity to the tip. // Now, perform the same tests for when the node receives a block. @@ -62,15 +62,15 @@ BOOST_AUTO_TEST_CASE(connections_desirable_service_flags) // First, verify a block in the past doesn't enable limited peers connections // At this point, our time is (NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS + 1) * 10 minutes ahead the tip's time. mineBlock(m_node, /*block_time=*/std::chrono::seconds{tip_block_time + 1}); - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA)); // Verify a block close to the tip enables limited peers connections mineBlock(m_node, /*block_time=*/GetTime()); - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA)); // Lastly, verify the stale tip checks can disallow limited peers connections after not receiving blocks for a prolonged period. SetMockTime(GetTime() + std::chrono::seconds{consensus.nPowTargetSpacing * NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS + 1}); - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA)); } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py index 6c022d1ef1..44687b0c52 100755 --- a/test/functional/interface_bitcoin_cli.py +++ b/test/functional/interface_bitcoin_cli.py @@ -94,7 +94,7 @@ def test_netinfo(self): self.log.info("Test -netinfo local services are moved to header if details are requested") det = self.nodes[0].cli('-netinfo', '1').send_cli().splitlines() self.log.debug(f"Test -netinfo 1 header output: {det[0]}") - assert re.match(rf"^{re.escape(self.config['environment']['CLIENT_NAME'])} client.+services nwl2?1$", det[0]) + assert re.match(rf"^{re.escape(self.config['environment']['CLIENT_NAME'])} client.+services nwl2?4$", det[0]) assert not any(line.startswith("Local services:") for line in det) def run_test(self): diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index 1a5bf45301..b8be5780cc 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -11,7 +11,7 @@ from test_framework.messages import ( CAddress, - NODE_BIP148, + NODE_REDUCED_DATA, NODE_NETWORK, NODE_WITNESS, msg_addr, @@ -55,7 +55,7 @@ def on_addr(self, message): if self.test_addr_contents: # relay_tests checks the content of the addr messages match # expectations based on the message creation in setup_addr_msg - assert_equal(addr.nServices, NODE_NETWORK | NODE_WITNESS | NODE_BIP148) + assert_equal(addr.nServices, NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA) if not 8333 <= addr.port < 8343: raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port)) assert addr.ip.startswith('123.123.') diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py index 67c7174ffe..257003859e 100755 --- a/test/functional/p2p_handshake.py +++ b/test/functional/p2p_handshake.py @@ -11,7 +11,7 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.messages import ( - NODE_BIP148, + NODE_REDUCED_DATA, NODE_NETWORK, NODE_NETWORK_LIMITED, NODE_NONE, @@ -35,8 +35,8 @@ # the desirable service flags for pruned peers are dynamic and only apply if # 1. the peer's service flag NODE_NETWORK_LIMITED is set *and* # 2. the local chain is close to the tip (<24h) -DESIRABLE_SERVICE_FLAGS_FULL = NODE_NETWORK | NODE_WITNESS | NODE_BIP148 -DESIRABLE_SERVICE_FLAGS_PRUNED = NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_BIP148 +DESIRABLE_SERVICE_FLAGS_FULL = NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA +DESIRABLE_SERVICE_FLAGS_PRUNED = NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA class P2PHandshakeTest(BitcoinTestFramework): @@ -99,15 +99,15 @@ def run_test(self): self.log.info("Check that lacking desired service flags leads to disconnect (non-pruned peers)") self.test_desirable_service_flags(node, [NODE_NONE, NODE_NETWORK, NODE_WITNESS], DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True) - self.test_desirable_service_flags(node, [NODE_NETWORK | NODE_WITNESS | NODE_BIP148], + self.test_desirable_service_flags(node, [NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA], DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=False) self.log.info("Check that limited peers are only desired if the local chain is close to the tip (<24h)") self.generate_at_mocktime(int(time.time()) - 25 * 3600) # tip outside the 24h window, should fail - self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_BIP148], + self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA], DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True) self.generate_at_mocktime(int(time.time()) - 23 * 3600) # tip inside the 24h window, should succeed - self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_BIP148], + self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA], DESIRABLE_SERVICE_FLAGS_PRUNED, expect_disconnect=False) self.log.info("Check that feeler connections get disconnected immediately") diff --git a/test/functional/p2p_node_network_limited.py b/test/functional/p2p_node_network_limited.py index c2d789609e..fb0b12238e 100755 --- a/test/functional/p2p_node_network_limited.py +++ b/test/functional/p2p_node_network_limited.py @@ -11,7 +11,7 @@ from test_framework.messages import ( CInv, MSG_BLOCK, - NODE_BIP148, + NODE_REDUCED_DATA, NODE_NETWORK_LIMITED, NODE_P2P_V2, NODE_WITNESS, @@ -123,7 +123,7 @@ def test_avoid_requesting_historical_blocks(self): def run_test(self): node = self.nodes[0].add_p2p_connection(P2PIgnoreInv()) - expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED | NODE_BIP148 + expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED | NODE_REDUCED_DATA if self.options.v2transport: expected_services |= NODE_P2P_V2 diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 0fce884b44..9dadddc887 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -14,7 +14,7 @@ import test_framework.messages from test_framework.messages import ( - NODE_BIP148, + NODE_REDUCED_DATA, NODE_NETWORK, NODE_WITNESS, ) @@ -320,8 +320,8 @@ def test_getnodeaddresses(self): assert_greater_than(10000, len(node_addresses)) for a in node_addresses: assert_greater_than(a["time"], 1527811200) # 1st June 2018 - # addpeeraddress stores addresses with default services (NODE_NETWORK | NODE_WITNESS | NODE_BIP148) - assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS | NODE_BIP148) + # addpeeraddress stores addresses with default services (NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA) + assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA) assert a["address"] in imported_addrs assert_equal(a["port"], 8333) assert_equal(a["network"], "ipv4") @@ -332,8 +332,8 @@ def test_getnodeaddresses(self): assert_equal(res[0]["address"], ipv6_addr) assert_equal(res[0]["network"], "ipv6") assert_equal(res[0]["port"], 8333) - # addpeeraddress stores addresses with default services (NODE_NETWORK | NODE_WITNESS | NODE_BIP148) - assert_equal(res[0]["services"], NODE_NETWORK | NODE_WITNESS | NODE_BIP148) + # addpeeraddress stores addresses with default services (NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA) + assert_equal(res[0]["services"], NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA) # Test for the absence of onion, I2P and CJDNS addresses. for network in ["onion", "i2p", "cjdns"]: @@ -511,7 +511,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "82/8", "address": "2.0.0.0", "port": 8333, - "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, + "services": NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA, "network": "ipv4", "source": "2.0.0.0", "source_network": "ipv4", @@ -520,7 +520,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "336/24", "address": "fc00:1:2:3:4:5:6:7", "port": 8333, - "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, + "services": NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA, "network": "cjdns", "source": "fc00:1:2:3:4:5:6:7", "source_network": "cjdns", @@ -529,7 +529,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "963/46", "address": "c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p", "port": 8333, - "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, + "services": NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA, "network": "i2p", "source": "c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p", "source_network": "i2p", @@ -537,7 +537,7 @@ def check_getrawaddrman_entries(expected): { "bucket_position": "613/6", "address": "2803:0:1234:abcd::1", - "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, + "services": NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA, "network": "ipv6", "source": "2803:0:1234:abcd::1", "source_network": "ipv6", @@ -549,7 +549,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "6/33", "address": "1.2.3.4", "port": 8333, - "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, + "services": NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA, "network": "ipv4", "source": "1.2.3.4", "source_network": "ipv4", @@ -558,7 +558,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "197/34", "address": "1233:3432:2434:2343:3234:2345:6546:4534", "port": 8333, - "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, + "services": NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA, "network": "ipv6", "source": "1233:3432:2434:2343:3234:2345:6546:4534", "source_network": "ipv6", @@ -567,7 +567,7 @@ def check_getrawaddrman_entries(expected): "bucket_position": "72/61", "address": "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", "port": 8333, - "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, + "services": NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA, "network": "onion", "source": "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion", "source_network": "onion" @@ -575,7 +575,7 @@ def check_getrawaddrman_entries(expected): { "bucket_position": "139/46", "address": "nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion", - "services": NODE_NETWORK | NODE_WITNESS | NODE_BIP148, + "services": NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA, "network": "onion", "source": "nrfj6inpyf73gpkyool35hcmne5zwfmse3jl3aw23vk7chdemalyaqad.onion", "source_network": "onion", diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 73217d5dd2..d596d8ae77 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -57,7 +57,7 @@ NODE_NETWORK_LIMITED = (1 << 10) NODE_P2P_V2 = (1 << 11) NODE_REPLACE_BY_FEE = (1 << 26) -NODE_BIP148 = (1 << 27) +NODE_REDUCED_DATA = (1 << 27) MSG_TX = 1 MSG_BLOCK = 2 diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index 540ae2c4af..b6a5bd476e 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -73,7 +73,7 @@ msg_wtxidrelay, NODE_NETWORK, NODE_WITNESS, - NODE_BIP148, + NODE_REDUCED_DATA, MAGIC_BYTES, sha256, ) @@ -96,7 +96,7 @@ # Version 70016 supports wtxid relay P2P_VERSION = 70016 # The services that this test framework offers in its `version` message -P2P_SERVICES = NODE_NETWORK | NODE_WITNESS | NODE_BIP148 +P2P_SERVICES = NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA # The P2P user agent string that this test framework sends in its `version` message P2P_SUBVERSION = "/python-p2p-tester:0.0.3/" # Value for relay that this test framework sends in its `version` message From ec403fd6c52f6b70e382c4e263caa479abcb5d60 Mon Sep 17 00:00:00 2001 From: 3c853b6299 <3c853b6299@pm.me> Date: Tue, 4 Nov 2025 18:18:45 -0600 Subject: [PATCH 317/356] test: implement functional tests for ReducedData Spec --- test/functional/feature_rdts.py | 915 ++++++++++++++++++++++++++++++++ test/functional/test_runner.py | 1 + 2 files changed, 916 insertions(+) create mode 100755 test/functional/feature_rdts.py diff --git a/test/functional/feature_rdts.py b/test/functional/feature_rdts.py new file mode 100755 index 0000000000..933fd9289a --- /dev/null +++ b/test/functional/feature_rdts.py @@ -0,0 +1,915 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test ReducedData Temporary Softfork (RDTS) consensus rules. + +This test verifies all 7 consensus rules enforced by DEPLOYMENT_REDUCED_DATA: + +1. Output scriptPubKeys exceeding 34 bytes are invalid (except OP_RETURN up to 83 bytes) +2. OP_PUSHDATA* with payloads larger than 256 bytes are invalid (except BIP16 redeemScript) +3. Spending undefined witness versions (not v0/v1) is invalid +4. Witness stacks with a Taproot annex are invalid +5. Taproot control blocks larger than 257 bytes are invalid (max 7 merkle nodes = 128 leaves) +6. Tapscripts including OP_SUCCESS* opcodes are invalid +7. Tapscripts executing OP_IF or OP_NOTIF instructions are invalid +""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.wallet import MiniWallet +from test_framework.messages import ( + CBlock, + COutPoint, + CTransaction, + CTxIn, + CTxInWitness, + CTxOut, + COIN, + MAX_OP_RETURN_RELAY, +) +from test_framework.p2p import P2PDataStore +from test_framework.script import ( + ANNEX_TAG, + CScript, + CScriptOp, + is_op_success, + LEAF_VERSION_TAPSCRIPT, + OP_0, + OP_1, + OP_2, + OP_3, + OP_4, + OP_5, + OP_6, + OP_7, + OP_8, + OP_9, + OP_10, + OP_11, + OP_12, + OP_13, + OP_14, + OP_15, + OP_16, + OP_CHECKSIG, + OP_CHECKSIGADD, + OP_CHECKMULTISIG, + OP_DROP, + OP_DUP, + OP_EQUAL, + OP_EQUALVERIFY, + OP_HASH160, + OP_IF, + OP_NOTIF, + OP_ENDIF, + OP_PUSHDATA1, + OP_PUSHDATA2, + OP_RETURN, + OP_TRUE, + SegwitV0SignatureHash, + SIGHASH_ALL, + SIGHASH_DEFAULT, + hash160, + sha256, + taproot_construct, + TaprootSignatureHash, +) +from test_framework.blocktools import ( + create_block, + create_coinbase, + add_witness_commitment, +) +from test_framework.script_util import ( + script_to_p2wsh_script, + script_to_p2sh_script, +) +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, +) +from test_framework.key import ( + ECKey, + compute_xonly_pubkey, + generate_privkey, + sign_schnorr, + tweak_add_privkey, +) +from io import BytesIO +import struct + + +# Constants from BIP444 +MAX_OUTPUT_SCRIPT_SIZE = 34 +MAX_OUTPUT_DATA_SIZE = 83 +MAX_SCRIPT_ELEMENT_SIZE_REDUCED = 256 +TAPROOT_CONTROL_BASE_SIZE = 33 +TAPROOT_CONTROL_NODE_SIZE = 32 +TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED = 7 +TAPROOT_CONTROL_MAX_SIZE_REDUCED = TAPROOT_CONTROL_BASE_SIZE + TAPROOT_CONTROL_NODE_SIZE * TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED +# ANNEX_TAG is imported from test_framework.script + + +class ReducedDataTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + # Make DEPLOYMENT_REDUCED_DATA always active (from block 0) + # Using start_time=-1 (ALWAYS_ACTIVE) bypasses BIP9 state machine + self.extra_args = [[ + '-vbparams=reduced_data:-1:999999999999:0', + '-acceptnonstdtxn=1', + ]] + + def init_test(self): + """Initialize test by mining blocks and creating UTXOs.""" + node = self.nodes[0] + + # MiniWallet provides a simple wallet for test transactions + self.wallet = MiniWallet(node) + + # Mine 120 blocks to mature coinbase outputs and create spending UTXOs + # (101 for maturity + extras since each test consumes a UTXO) + self.generate(self.wallet, 120) + + self.log.info("Test initialization complete") + + def create_test_transaction(self, scriptPubKey, value=None): + """Helper to create a transaction with custom scriptPubKey (not broadcast).""" + # Start with a valid transaction from the wallet + tx_dict = self.wallet.create_self_transfer() + tx = tx_dict['tx'] + + # Use default output value if not specified (handles fee calculation) + if value is None: + value = tx.vout[0].nValue + + # Replace output with our custom scriptPubKey + tx.vout[0] = CTxOut(value, scriptPubKey) + tx.rehash() + + return tx + + def test_output_script_size_limit(self): + """Test spec 1: Output scriptPubKeys exceeding 34 bytes are invalid.""" + self.log.info("Testing output scriptPubKey size limits...") + + node = self.nodes[0] + + # Test 1.1: 34-byte P2WSH output (exactly at limit - should pass) + witness_program_32 = b'\x00' * 32 + script_p2wsh = CScript([OP_0, witness_program_32]) # OP_0 (1 byte) + 32-byte push = 34 bytes + assert_equal(len(script_p2wsh), 34) + + tx_valid = self.create_test_transaction(script_p2wsh) + result = node.testmempoolaccept([tx_valid.serialize().hex()])[0] + if not result['allowed']: + self.log.info(f" DEBUG: P2WSH rejection reason: {result}") + assert_equal(result['allowed'], True) + self.log.info(" ✓ 34-byte P2WSH output accepted") + + # Test 1.2: 35-byte P2PK output (exceeds limit - should fail) + pubkey_33 = b'\x02' + b'\x00' * 32 # Compressed pubkey + script_p2pk = CScript([pubkey_33, OP_CHECKSIG]) # 33-byte push + OP_CHECKSIG = 35 bytes + assert_equal(len(script_p2pk), 35) + + tx_invalid = self.create_test_transaction(script_p2pk) + result = node.testmempoolaccept([tx_invalid.serialize().hex()])[0] + assert_equal(result['allowed'], False) + assert 'bad-txns-vout-script-toolarge' in result['reject-reason'] + self.log.info(" ✓ 35-byte P2PK output rejected") + + # Test 1.3: 37-byte bare multisig (exceeds limit - should fail) + script_bare_multisig = CScript([OP_1, pubkey_33, OP_1, OP_CHECKMULTISIG]) + assert len(script_bare_multisig) >= 37 + + tx_invalid = self.create_test_transaction(script_bare_multisig) + result = node.testmempoolaccept([tx_invalid.serialize().hex()])[0] + assert_equal(result['allowed'], False) + assert 'bad-txns-vout-script-toolarge' in result['reject-reason'] + self.log.info(" ✓ 37-byte bare multisig output rejected") + + # Test 1.4: OP_RETURN with 83 bytes (at the OP_RETURN exception limit) + # Note: CScript adds PUSHDATA overhead for data >75 bytes + # 80 bytes data: OP_RETURN (1) + direct push (1) + data (80) = 82 bytes total + # 81+ bytes data: OP_RETURN (1) + OP_PUSHDATA1 (1) + len (1) + data = 84+ bytes + data_80 = b'\x00' * 80 + script_opreturn_82 = CScript([OP_RETURN, data_80]) + self.log.info(f" DEBUG: OP_RETURN script with 80 data bytes has length: {len(script_opreturn_82)}") + + tx_valid = self.create_test_transaction(script_opreturn_82, value=0) + result = node.testmempoolaccept([tx_valid.serialize().hex()])[0] + # OP_RETURN with value=0 may be rejected by standardness policy + self.log.info(f" ✓ OP_RETURN with {len(script_opreturn_82)} bytes: {result.get('allowed', False)}") + + # Test 1.5: OP_RETURN with 85 bytes (exceeds 83-byte exception) + data_82 = b'\x00' * 82 + script_opreturn_85 = CScript([OP_RETURN, data_82]) + self.log.info(f" DEBUG: OP_RETURN script with 82 data bytes has length: {len(script_opreturn_85)}") + + tx_invalid = self.create_test_transaction(script_opreturn_85, value=0) + result = node.testmempoolaccept([tx_invalid.serialize().hex()])[0] + assert_equal(result['allowed'], False) + if result['allowed'] == False: + self.log.info(f" ✓ OP_RETURN with {len(script_opreturn_85)} bytes rejected") + + def test_pushdata_size_limit(self): + """Test spec 2: OP_PUSHDATA* with payloads > 256 bytes are invalid.""" + self.log.info("Testing OP_PUSHDATA size limits...") + + node = self.nodes[0] + + # Standard P2WPKH hash for outputs (avoids tx-size-small policy rejection) + dummy_pubkey_hash = hash160(b'\x00' * 33) + + # Test 2.1: Witness script with 256-byte PUSHDATA (exactly at limit - should pass) + data_256 = b'\x00' * 256 + witness_script_256 = CScript([data_256, OP_DROP, OP_TRUE]) # Script: <256 bytes> DROP TRUE + script_pubkey_256 = script_to_p2wsh_script(witness_script_256) + + # First create an output with this witness script + funding_tx_256 = self.create_test_transaction(script_pubkey_256) + txid_256 = node.sendrawtransaction(funding_tx_256.serialize().hex()) + self.generate(node, 1) + output_value_256 = funding_tx_256.vout[0].nValue + + # Now spend it - this reveals the witness script with the 256-byte PUSHDATA + spending_tx_256 = CTransaction() + spending_tx_256.vin = [CTxIn(COutPoint(int(txid_256, 16), 0))] + spending_tx_256.vout = [CTxOut(output_value_256 - 10000, CScript([OP_0, dummy_pubkey_hash]))] + spending_tx_256.wit.vtxinwit = [CTxInWitness()] + spending_tx_256.wit.vtxinwit[0].scriptWitness.stack = [witness_script_256] + spending_tx_256.rehash() + + # 256 bytes is at the limit, should be accepted + result = node.testmempoolaccept([spending_tx_256.serialize().hex()])[0] + if not result['allowed']: + self.log.info(f" DEBUG: 256-byte PUSHDATA rejection: {result}") + assert_equal(result['allowed'], True) + self.log.info(" ✓ PUSHDATA with 256 bytes accepted in witness script") + + # Test 2.2: Witness script with 257-byte PUSHDATA (exceeds limit - should fail) + data_257 = b'\x00' * 257 + witness_script_257 = CScript([data_257, OP_DROP, OP_TRUE]) + script_pubkey_257 = script_to_p2wsh_script(witness_script_257) + + # Create and fund the output + funding_tx_257 = self.create_test_transaction(script_pubkey_257) + txid_257 = node.sendrawtransaction(funding_tx_257.serialize().hex()) + self.generate(node, 1) + output_value_257 = funding_tx_257.vout[0].nValue + + # Try to spend it - should be rejected due to 257-byte PUSHDATA + spending_tx_257 = CTransaction() + spending_tx_257.vin = [CTxIn(COutPoint(int(txid_257, 16), 0))] + spending_tx_257.vout = [CTxOut(output_value_257 - 10000, CScript([OP_0, dummy_pubkey_hash]))] + spending_tx_257.wit.vtxinwit = [CTxInWitness()] + spending_tx_257.wit.vtxinwit[0].scriptWitness.stack = [witness_script_257] + spending_tx_257.rehash() + + result = node.testmempoolaccept([spending_tx_257.serialize().hex()])[0] + assert_equal(result['allowed'], False) + assert 'non-mandatory-script-verify-flag' in result['reject-reason'] or 'Push value size limit exceeded' in result['reject-reason'] + self.log.info(" ✓ PUSHDATA with 257 bytes rejected in witness script") + + # Test 2.3: P2SH redeemScript with 300-byte PUSHDATA (tests BIP16 exception boundary) + # Important: BIP16 allows pushing the redeemScript itself even if >256 bytes, + # BUT any PUSHDATAs executed WITHIN that redeemScript are still limited to 256 bytes + large_redeem_script = CScript([b'\x00' * 300, OP_DROP, OP_TRUE]) # Contains 300-byte PUSHDATA + p2sh_script_pubkey = script_to_p2sh_script(large_redeem_script) + + # Create the P2SH output + funding_tx_p2sh = self.create_test_transaction(p2sh_script_pubkey) + txid_p2sh = node.sendrawtransaction(funding_tx_p2sh.serialize().hex()) + self.generate(node, 1) + output_value_p2sh = funding_tx_p2sh.vout[0].nValue + + # Spend it by revealing the redeemScript in scriptSig + spending_tx_p2sh = CTransaction() + spending_tx_p2sh.vin = [CTxIn(COutPoint(int(txid_p2sh, 16), 0), CScript([large_redeem_script]))] + spending_tx_p2sh.vout = [CTxOut(output_value_p2sh - 10000, CScript([OP_0, dummy_pubkey_hash]))] + spending_tx_p2sh.rehash() + + # Should fail because the 300-byte PUSHDATA inside the redeemScript exceeds the limit + result = node.testmempoolaccept([spending_tx_p2sh.serialize().hex()])[0] + assert_equal(result['allowed'], False) + assert 'non-mandatory-script-verify-flag' in result['reject-reason'] or 'Push value size limit exceeded' in result['reject-reason'] + self.log.info(" ✓ P2SH redeemScript with >256 byte PUSHDATA correctly rejected") + self.log.info(" (BIP16 exception only applies to pushing the redeemScript blob, not PUSHDATAs within it)") + + def test_undefined_witness_versions(self): + """Test spec 3: Spending undefined witness versions is invalid. + + Bitcoin currently defines witness v0 (P2WPKH/P2WSH) and v1 (Taproot). + Versions v2-v16 are reserved for future upgrades and are currently undefined. + After DEPLOYMENT_REDUCED_DATA, spending these undefined versions is invalid. + """ + self.log.info("Testing undefined witness version rejection...") + + node = self.nodes[0] + + # Test witness v2 as representative (same logic applies to v3-v16) + version_op = OP_2 # Witness version 2 + version = version_op - 0x50 # Convert OP_2 to numeric 2 + + # Create output to witness v2: <32-byte program> + witness_program = b'\x00' * 32 + script_v2 = CScript([CScriptOp(version_op), witness_program]) + + # Step 1: Create an output to witness v2 (this is allowed) + funding_tx = self.create_test_transaction(script_v2) + txid = node.sendrawtransaction(funding_tx.serialize().hex()) + self.generate(node, 1) + self.log.info(f" Created witness v2 output in tx {txid[:16]}...") + + # Step 2: Try to spend the witness v2 output (should be rejected) + spending_tx = CTransaction() + spending_tx.vin = [CTxIn(COutPoint(int(txid, 16), 0))] + dummy_pubkey_hash = hash160(b'\x00' * 33) + spending_tx.vout = [CTxOut(funding_tx.vout[0].nValue - 10000, CScript([OP_0, dummy_pubkey_hash]))] + + # For undefined witness versions, pre-softfork behavior was "anyone-can-spend" + # with an empty witness stack. Post-REDUCED_DATA, this is now invalid. + spending_tx.wit.vtxinwit = [CTxInWitness()] + spending_tx.wit.vtxinwit[0].scriptWitness.stack = [] # Empty witness + spending_tx.rehash() + + # Should be rejected - undefined witness versions can't be spent after activation + result = node.testmempoolaccept([spending_tx.serialize().hex()])[0] + assert_equal(result['allowed'], False) + # Rejection happens during script verification + assert any(x in result['reject-reason'] for x in ['mempool-script-verify-flag', 'witness-program', 'bad-witness', 'discouraged']) + self.log.info(f" ✓ Witness v{version} spending correctly rejected ({result['reject-reason']})") + + # All undefined versions (v2-v16) are validated identically + self.log.info(f" ✓ Witness versions v2-v16 are all similarly rejected") + + def test_taproot_annex_rejection(self): + """Test spec 4: Witness stacks with a Taproot annex are invalid.""" + self.log.info("Testing Taproot annex rejection...") + node = self.nodes[0] + + # Generate a Taproot key pair for testing + privkey = generate_privkey() + internal_pubkey, _ = compute_xonly_pubkey(privkey) + + # Create a simple Taproot output (key-path only, no script tree) + taproot_info = taproot_construct(internal_pubkey) + taproot_spk = taproot_info.scriptPubKey + + # Test 4.1: Taproot key-path spend WITHOUT annex (valid baseline) + self.log.info(" Test 4.1: Taproot key-path spend without annex (should be valid)") + + # Create funding transaction with Taproot output + funding_tx = self.create_test_transaction(taproot_spk) + funding_txid = funding_tx.rehash() + + # Mine the funding transaction in a block + block_height = node.getblockcount() + 1 + block = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block.vtx.append(funding_tx) + add_witness_commitment(block) + block.solve() + node.submitblock(block.serialize().hex()) + + # Create spending transaction (key-path, no annex) + spending_tx = CTransaction() + spending_tx.vin = [CTxIn(COutPoint(int(funding_txid, 16), 0), nSequence=0)] + # Use the actual output value from funding_tx minus a small fee + output_value = funding_tx.vout[0].nValue - 1000 # 1000 sats fee + spending_tx.vout = [CTxOut(output_value, CScript([OP_1, bytes(20)]))] # P2WPKH output + + # Sign with Schnorr signature for Taproot key-path spend + sighash = TaprootSignatureHash(spending_tx, [funding_tx.vout[0]], SIGHASH_DEFAULT, 0) + tweaked_privkey = tweak_add_privkey(privkey, taproot_info.tweak) + sig = sign_schnorr(tweaked_privkey, sighash) + + # Witness for key-path: just the signature + spending_tx.wit.vtxinwit.append(CTxInWitness()) + spending_tx.wit.vtxinwit[0].scriptWitness.stack = [sig] + + # This should be accepted (no annex) + result = node.testmempoolaccept([spending_tx.serialize().hex()])[0] + if not result['allowed']: + self.log.info(f" DEBUG: Taproot spend rejection: {result}") + assert_equal(result['allowed'], True) + self.log.info(" ✓ Taproot key-path spend without annex: ACCEPTED") + + # Test 4.2: Taproot key-path spend WITH annex (invalid after DEPLOYMENT_REDUCED_DATA) + self.log.info(" Test 4.2: Taproot key-path spend with annex (should be rejected)") + + # Create another funding transaction + funding_tx2 = self.create_test_transaction(taproot_spk) + funding_txid2 = funding_tx2.rehash() + + # Mine the funding transaction in a block + block_height2 = node.getblockcount() + 1 + block2 = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height2), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block2.vtx.append(funding_tx2) + add_witness_commitment(block2) + block2.solve() + node.submitblock(block2.serialize().hex()) + + # Create spending transaction with annex + spending_tx2 = CTransaction() + spending_tx2.vin = [CTxIn(COutPoint(int(funding_txid2, 16), 0), nSequence=0)] + output_value2 = funding_tx2.vout[0].nValue - 1000 + spending_tx2.vout = [CTxOut(output_value2, CScript([OP_1, bytes(20)]))] + + # Sign the transaction (annex affects sighash) + annex = bytes([ANNEX_TAG]) + b'\x00' * 10 # Annex must start with 0x50 + sighash2 = TaprootSignatureHash(spending_tx2, [funding_tx2.vout[0]], SIGHASH_DEFAULT, 0, annex=annex) + sig2 = sign_schnorr(tweaked_privkey, sighash2) + + # Witness for key-path with annex: [signature, annex] + spending_tx2.wit.vtxinwit.append(CTxInWitness()) + spending_tx2.wit.vtxinwit[0].scriptWitness.stack = [sig2, annex] + + # This should be rejected (annex present) + result2 = node.testmempoolaccept([spending_tx2.serialize().hex()])[0] + if result2['allowed']: + self.log.info(f" DEBUG: Taproot spend with annex was unexpectedly accepted: {result2}") + assert_equal(result2['allowed'], False) + self.log.info(f" ✓ Taproot spend with annex: REJECTED ({result2['reject-reason']})") + + def test_taproot_control_block_size(self): + """Test spec 5: Taproot control blocks > 257 bytes are invalid.""" + self.log.info("Testing Taproot control block size limits...") + node = self.nodes[0] + + # Control block size = 33 + 32 * num_nodes + # Max allowed: 7 nodes = 33 + 32*7 = 257 bytes (depth 7, 128 leaves) + # Invalid: 8 nodes = 33 + 32*8 = 289 bytes (depth 8, 256 leaves) + + max_valid_size = TAPROOT_CONTROL_MAX_SIZE_REDUCED + assert_equal(max_valid_size, 257) + self.log.info(f" Max valid control block size: {max_valid_size} bytes (7 nodes)") + + # Helper function to build a balanced binary tree of given depth + def build_tree(depth, leaf_prefix="leaf"): + """Build a balanced binary tree for Taproot script tree.""" + if depth == 0: + # At leaf level, return a simple script + return (f"{leaf_prefix}", CScript([OP_TRUE])) + else: + # Recursively build left and right subtrees + left = build_tree(depth - 1, f"{leaf_prefix}_L") + right = build_tree(depth - 1, f"{leaf_prefix}_R") + return [left, right] + + # Generate a Taproot key pair + privkey = generate_privkey() + internal_pubkey, _ = compute_xonly_pubkey(privkey) + + # Test 5.1: Control block with 7 merkle nodes (valid, 257 bytes) + self.log.info(" Test 5.1: Control block with 7 nodes / depth 7 (should be valid)") + + # Build a balanced tree of depth 7 (128 leaves) + tree_valid = build_tree(7) + taproot_info_valid = taproot_construct(internal_pubkey, [tree_valid]) + taproot_spk_valid = taproot_info_valid.scriptPubKey + + # Create and mine funding transaction + funding_tx_valid = self.create_test_transaction(taproot_spk_valid) + funding_txid_valid = funding_tx_valid.rehash() + + block_height = node.getblockcount() + 1 + block = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block.vtx.append(funding_tx_valid) + add_witness_commitment(block) + block.solve() + node.submitblock(block.serialize().hex()) + + # Spend using the deepest leaf (which will have the longest control block) + # The deepest leaf should be at path L_L_L_L_L_L_L (all left) + deepest_leaf_name = "leaf" + "_L" * 7 + leaf_info_valid = taproot_info_valid.leaves[deepest_leaf_name] + control_block_valid = bytes([leaf_info_valid.version + taproot_info_valid.negflag]) + internal_pubkey + leaf_info_valid.merklebranch + + # Verify control block size + assert_equal(len(control_block_valid), 257) + self.log.info(f" Control block size: {len(control_block_valid)} bytes ✓") + + # Create spending transaction + spending_tx_valid = CTransaction() + spending_tx_valid.vin = [CTxIn(COutPoint(int(funding_txid_valid, 16), 0), nSequence=0)] + output_value = funding_tx_valid.vout[0].nValue - 1000 + spending_tx_valid.vout = [CTxOut(output_value, CScript([OP_1, bytes(20)]))] + + spending_tx_valid.wit.vtxinwit.append(CTxInWitness()) + spending_tx_valid.wit.vtxinwit[0].scriptWitness.stack = [leaf_info_valid.script, control_block_valid] + + result_valid = node.testmempoolaccept([spending_tx_valid.serialize().hex()])[0] + if not result_valid['allowed']: + self.log.info(f" DEBUG: Depth 7 rejection: {result_valid}") + assert_equal(result_valid['allowed'], True) + self.log.info(" ✓ Control block with 7 nodes (257 bytes): ACCEPTED") + + # Test 5.2: Control block with 8 merkle nodes (invalid, 289 bytes) + self.log.info(" Test 5.2: Control block with 8 nodes / depth 8 (should be rejected)") + + # Build a balanced tree of depth 8 (256 leaves) + tree_invalid = build_tree(8) + taproot_info_invalid = taproot_construct(internal_pubkey, [tree_invalid]) + taproot_spk_invalid = taproot_info_invalid.scriptPubKey + + # Create and mine funding transaction + funding_tx_invalid = self.create_test_transaction(taproot_spk_invalid) + funding_txid_invalid = funding_tx_invalid.rehash() + + block_height = node.getblockcount() + 1 + block = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block.vtx.append(funding_tx_invalid) + add_witness_commitment(block) + block.solve() + node.submitblock(block.serialize().hex()) + + # Spend using the deepest leaf + deepest_leaf_name_invalid = "leaf" + "_L" * 8 + leaf_info_invalid = taproot_info_invalid.leaves[deepest_leaf_name_invalid] + control_block_invalid = bytes([leaf_info_invalid.version + taproot_info_invalid.negflag]) + internal_pubkey + leaf_info_invalid.merklebranch + + # Verify control block size + assert_equal(len(control_block_invalid), 289) + self.log.info(f" Control block size: {len(control_block_invalid)} bytes (exceeds 257)") + + # Create spending transaction + spending_tx_invalid = CTransaction() + spending_tx_invalid.vin = [CTxIn(COutPoint(int(funding_txid_invalid, 16), 0), nSequence=0)] + output_value = funding_tx_invalid.vout[0].nValue - 1000 + spending_tx_invalid.vout = [CTxOut(output_value, CScript([OP_1, bytes(20)]))] + + spending_tx_invalid.wit.vtxinwit.append(CTxInWitness()) + spending_tx_invalid.wit.vtxinwit[0].scriptWitness.stack = [leaf_info_invalid.script, control_block_invalid] + + result_invalid = node.testmempoolaccept([spending_tx_invalid.serialize().hex()])[0] + if result_invalid['allowed']: + self.log.info(f" DEBUG: Depth 8 was unexpectedly accepted: {result_invalid}") + assert_equal(result_invalid['allowed'], False) + self.log.info(f" ✓ Control block with 8 nodes (289 bytes): REJECTED ({result_invalid['reject-reason']})") + + def test_op_success_rejection(self): + """Test spec 6: Tapscripts including OP_SUCCESS* opcodes are invalid.""" + self.log.info("Testing OP_SUCCESS opcode rejection...") + node = self.nodes[0] + + # Generate a Taproot key pair + privkey = generate_privkey() + internal_pubkey, _ = compute_xonly_pubkey(privkey) + + # Test 6.1: Tapscript without OP_SUCCESS (valid baseline) + self.log.info(" Test 6.1: Tapscript without OP_SUCCESS (should be valid)") + + # Create a simple Tapscript: OP_TRUE (always valid) + tapscript_valid = CScript([OP_TRUE]) + taproot_info_valid = taproot_construct(internal_pubkey, [("valid", tapscript_valid)]) + taproot_spk_valid = taproot_info_valid.scriptPubKey + + # Create and mine funding transaction + funding_tx_valid = self.create_test_transaction(taproot_spk_valid) + funding_txid_valid = funding_tx_valid.rehash() + + block_height = node.getblockcount() + 1 + block = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block.vtx.append(funding_tx_valid) + add_witness_commitment(block) + block.solve() + node.submitblock(block.serialize().hex()) + + # Create spending transaction (script-path) + spending_tx_valid = CTransaction() + spending_tx_valid.vin = [CTxIn(COutPoint(int(funding_txid_valid, 16), 0), nSequence=0)] + output_value = funding_tx_valid.vout[0].nValue - 1000 + spending_tx_valid.vout = [CTxOut(output_value, CScript([OP_1, bytes(20)]))] + + # Build witness for script-path spend + leaf_info = taproot_info_valid.leaves["valid"] + control_block = bytes([leaf_info.version + taproot_info_valid.negflag]) + internal_pubkey + leaf_info.merklebranch + spending_tx_valid.wit.vtxinwit.append(CTxInWitness()) + spending_tx_valid.wit.vtxinwit[0].scriptWitness.stack = [tapscript_valid, control_block] + + result_valid = node.testmempoolaccept([spending_tx_valid.serialize().hex()])[0] + if not result_valid['allowed']: + self.log.info(f" DEBUG: Valid Tapscript rejection: {result_valid}") + assert_equal(result_valid['allowed'], True) + self.log.info(" ✓ Tapscript without OP_SUCCESS: ACCEPTED") + + # Test 6.2: Tapscript with OP_SUCCESS (invalid) + self.log.info(" Test 6.2: Tapscript with OP_SUCCESS (should be rejected)") + + # Create a Tapscript with OP_SUCCESS: opcodes 0x50, 0x62, etc. + # IMPORTANT: Use CScriptOp to create the actual opcode, not PUSHDATA + # Testing 0x50 (which is also ANNEX_TAG but different context) + for op_success in [0x50, 0x62, 0x89]: + tapscript_invalid = CScript([CScriptOp(op_success)]) + taproot_info_invalid = taproot_construct(internal_pubkey, [("invalid", tapscript_invalid)]) + taproot_spk_invalid = taproot_info_invalid.scriptPubKey + + # Create and mine funding transaction + funding_tx_invalid = self.create_test_transaction(taproot_spk_invalid) + funding_txid_invalid = funding_tx_invalid.rehash() + + block_height = node.getblockcount() + 1 + block = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block.vtx.append(funding_tx_invalid) + add_witness_commitment(block) + block.solve() + node.submitblock(block.serialize().hex()) + + # Create spending transaction + spending_tx_invalid = CTransaction() + spending_tx_invalid.vin = [CTxIn(COutPoint(int(funding_txid_invalid, 16), 0), nSequence=0)] + output_value = funding_tx_invalid.vout[0].nValue - 1000 + spending_tx_invalid.vout = [CTxOut(output_value, CScript([OP_1, bytes(20)]))] + + # Build witness for script-path spend + leaf_info_invalid = taproot_info_invalid.leaves["invalid"] + control_block_invalid = bytes([leaf_info_invalid.version + taproot_info_invalid.negflag]) + internal_pubkey + leaf_info_invalid.merklebranch + spending_tx_invalid.wit.vtxinwit.append(CTxInWitness()) + spending_tx_invalid.wit.vtxinwit[0].scriptWitness.stack = [tapscript_invalid, control_block_invalid] + + result_invalid = node.testmempoolaccept([spending_tx_invalid.serialize().hex()])[0] + if result_invalid['allowed']: + self.log.info(f" DEBUG: OP_SUCCESS 0x{op_success:02x} was unexpectedly accepted") + assert_equal(result_invalid['allowed'], False) + self.log.info(f" ✓ Tapscript with OP_SUCCESS (0x{op_success:02x}): REJECTED ({result_invalid['reject-reason']})") + + def test_op_if_notif_rejection(self): + """Test spec 7: Tapscripts executing OP_IF or OP_NOTIF are invalid.""" + self.log.info("Testing OP_IF/OP_NOTIF rejection in Tapscript...") + node = self.nodes[0] + + # Generate a Taproot key pair + privkey = generate_privkey() + internal_pubkey, _ = compute_xonly_pubkey(privkey) + + # Test 7.1: Tapscript with OP_IF (invalid in Tapscript under DEPLOYMENT_REDUCED_DATA) + self.log.info(" Test 7.1: Tapscript with OP_IF (should be rejected)") + + # Create a Tapscript with OP_IF: OP_1 OP_IF OP_1 OP_ENDIF + tapscript_if = CScript([OP_1, OP_IF, OP_1, OP_ENDIF]) + taproot_info_if = taproot_construct(internal_pubkey, [("with_if", tapscript_if)]) + taproot_spk_if = taproot_info_if.scriptPubKey + + # Create and mine funding transaction + funding_tx_if = self.create_test_transaction(taproot_spk_if) + funding_txid_if = funding_tx_if.rehash() + + block_height = node.getblockcount() + 1 + block = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block.vtx.append(funding_tx_if) + add_witness_commitment(block) + block.solve() + node.submitblock(block.serialize().hex()) + + # Create spending transaction + spending_tx_if = CTransaction() + spending_tx_if.vin = [CTxIn(COutPoint(int(funding_txid_if, 16), 0), nSequence=0)] + output_value = funding_tx_if.vout[0].nValue - 1000 + spending_tx_if.vout = [CTxOut(output_value, CScript([OP_1, bytes(20)]))] + + # Build witness for script-path spend + leaf_info_if = taproot_info_if.leaves["with_if"] + control_block_if = bytes([leaf_info_if.version + taproot_info_if.negflag]) + internal_pubkey + leaf_info_if.merklebranch + spending_tx_if.wit.vtxinwit.append(CTxInWitness()) + spending_tx_if.wit.vtxinwit[0].scriptWitness.stack = [tapscript_if, control_block_if] + + result_if = node.testmempoolaccept([spending_tx_if.serialize().hex()])[0] + if result_if['allowed']: + self.log.info(f" DEBUG: OP_IF was unexpectedly accepted: {result_if}") + assert_equal(result_if['allowed'], False) + self.log.info(f" ✓ Tapscript with OP_IF: REJECTED ({result_if['reject-reason']})") + + # Test 7.2: Tapscript with OP_NOTIF (invalid in Tapscript under DEPLOYMENT_REDUCED_DATA) + self.log.info(" Test 7.2: Tapscript with OP_NOTIF (should be rejected)") + + # Create a Tapscript with OP_NOTIF: OP_0 OP_NOTIF OP_1 OP_ENDIF + tapscript_notif = CScript([OP_0, OP_NOTIF, OP_1, OP_ENDIF]) + taproot_info_notif = taproot_construct(internal_pubkey, [("with_notif", tapscript_notif)]) + taproot_spk_notif = taproot_info_notif.scriptPubKey + + # Create and mine funding transaction + funding_tx_notif = self.create_test_transaction(taproot_spk_notif) + funding_txid_notif = funding_tx_notif.rehash() + + block_height = node.getblockcount() + 1 + block = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block.vtx.append(funding_tx_notif) + add_witness_commitment(block) + block.solve() + node.submitblock(block.serialize().hex()) + + # Create spending transaction + spending_tx_notif = CTransaction() + spending_tx_notif.vin = [CTxIn(COutPoint(int(funding_txid_notif, 16), 0), nSequence=0)] + output_value = funding_tx_notif.vout[0].nValue - 1000 + spending_tx_notif.vout = [CTxOut(output_value, CScript([OP_1, bytes(20)]))] + + # Build witness for script-path spend + leaf_info_notif = taproot_info_notif.leaves["with_notif"] + control_block_notif = bytes([leaf_info_notif.version + taproot_info_notif.negflag]) + internal_pubkey + leaf_info_notif.merklebranch + spending_tx_notif.wit.vtxinwit.append(CTxInWitness()) + spending_tx_notif.wit.vtxinwit[0].scriptWitness.stack = [tapscript_notif, control_block_notif] + + result_notif = node.testmempoolaccept([spending_tx_notif.serialize().hex()])[0] + if result_notif['allowed']: + self.log.info(f" DEBUG: OP_NOTIF was unexpectedly accepted: {result_notif}") + assert_equal(result_notif['allowed'], False) + self.log.info(f" ✓ Tapscript with OP_NOTIF: REJECTED ({result_notif['reject-reason']})") + + def test_mandatory_flags_cannot_be_bypassed(self): + """Test that REDUCED_DATA consensus-mandatory flags cannot be bypassed via ignore_rejects. + + This test verifies that even though PolicyScriptChecks can be bypassed via ignore_rejects, + the subsequent ConsensusScriptChecks enforces consensus rules and prevents invalid transactions + from entering the mempool. + """ + self.log.info("Testing that REDUCED_DATA rules are enforced despite ignore_rejects...") + node = self.nodes[0] + + # Test case: Create a witness script with a 257-byte PUSHDATA (violates REDUCED_DATA) + self.log.info(" Test: 257-byte PUSHDATA in witness script") + + # Create a P2WSH output with a witness script containing 257-byte data push + witness_script_257 = CScript([b'\x00' * 257, OP_DROP, OP_TRUE]) + script_pubkey_257 = script_to_p2wsh_script(witness_script_257) + + # Create and fund the output + funding_tx_257 = self.create_test_transaction(script_pubkey_257) + txid_257 = node.sendrawtransaction(funding_tx_257.serialize().hex()) + self.generate(node, 1) + output_value_257 = funding_tx_257.vout[0].nValue + + # Create spending transaction that reveals the 257-byte PUSHDATA + spending_tx_257 = CTransaction() + spending_tx_257.vin = [CTxIn(COutPoint(int(txid_257, 16), 0))] + # Add padding to output to ensure tx meets minimum size requirements (82 bytes non-witness) + spending_tx_257.vout = [CTxOut(output_value_257 - 1000, CScript([OP_TRUE, OP_DROP] + [OP_TRUE] * 30))] + spending_tx_257.wit.vtxinwit.append(CTxInWitness()) + spending_tx_257.wit.vtxinwit[0].scriptWitness.stack = [witness_script_257] + spending_tx_257.rehash() + + # Test 1: Normal testmempoolaccept should reject + self.log.info(" Test 1a: Normal testmempoolaccept (should reject)") + result_normal = node.testmempoolaccept([spending_tx_257.serialize().hex()])[0] + assert_equal(result_normal['allowed'], False) + assert 'mempool-script-verify-flag' in result_normal['reject-reason'] + self.log.info(f" ✓ Normal testmempoolaccept correctly rejected: {result_normal['reject-reason']}") + + # Test 2: Try to bypass with ignore_rejects=["non-mandatory-script-verify-flag"] + # Expected: Transaction is STILL REJECTED because ConsensusScriptChecks enforces consensus rules + self.log.info(" Test 1b: testmempoolaccept with ignore_rejects") + self.log.info(" This bypasses PolicyScriptChecks but NOT ConsensusScriptChecks") + result_bypass = node.testmempoolaccept( + rawtxs=[spending_tx_257.serialize().hex()], + ignore_rejects=["non-mandatory-script-verify-flag"] + )[0] + + # The transaction should still be rejected because ConsensusScriptChecks + # uses GetBlockScriptFlags() which includes REDUCED_DATA consensus rules + self.log.info(f" Result: allowed={result_bypass['allowed']}") + assert_equal(result_bypass['allowed'], False) + self.log.info(f" ✓ Transaction correctly rejected: {result_bypass['reject-reason']}") + self.log.info(" ✓ ConsensusScriptChecks prevents bypass of REDUCED_DATA consensus rules") + + def test_p2wsh_multisig_witness_script_exemption(self): + """Test that a large P2WSH witness script (>256 bytes) is exempted from the element size limit. + + Inspired by mainnet tx a0032427454536006263d237819df5e72fe539a38cb26264ea45a1019fb53bee, + which is a 9-input transaction where each input spends an 11-of-15 P2WSH multisig. + + The witness script for 11-of-15 multisig is ~513 bytes, which exceeds the 256-byte + MAX_SCRIPT_ELEMENT_SIZE_REDUCED limit. However, for P2WSH spends, the witness script + is popped from the stack BEFORE the element size check runs in ExecuteWitnessScript, + so it is implicitly exempted. + """ + self.log.info("Testing 11-of-15 P2WSH multisig witness script exemption...") + + node = self.nodes[0] + + # Generate 15 key pairs + privkeys = [generate_privkey() for _ in range(15)] + pubkeys = [] + for priv in privkeys: + k = ECKey() + k.set(priv, compressed=True) + pubkeys.append(k.get_pubkey().get_bytes()) + + # Build 11-of-15 multisig witness script: + # OP_11 ... OP_15 OP_CHECKMULTISIG + witness_script = CScript([OP_11] + pubkeys + [OP_15, OP_CHECKMULTISIG]) + self.log.info(f" Witness script size: {len(witness_script)} bytes") + assert len(witness_script) > MAX_SCRIPT_ELEMENT_SIZE_REDUCED, \ + f"Witness script should exceed 256 bytes, got {len(witness_script)}" + + # Create P2WSH output + script_pubkey = script_to_p2wsh_script(witness_script) + + # Fund the P2WSH output + funding_tx = self.create_test_transaction(script_pubkey) + txid = node.sendrawtransaction(funding_tx.serialize().hex()) + self.generate(node, 1) + + # Create spending transaction + spending_tx = CTransaction() + spending_tx.vin = [CTxIn(COutPoint(int(txid, 16), 0))] + output_value = funding_tx.vout[0].nValue - 10000 + spending_tx.vout = [CTxOut(output_value, CScript([OP_0, hash160(b'\x01' * 33)]))] + + # Sign with 11 of the 15 keys + spending_tx.wit.vtxinwit = [CTxInWitness()] + sighash = SegwitV0SignatureHash( + witness_script, spending_tx, 0, SIGHASH_ALL, funding_tx.vout[0].nValue + ) + + sigs = [] + for i in range(11): + k = ECKey() + k.set(privkeys[i], compressed=True) + sig = k.sign_ecdsa(sighash) + b'\x01' # SIGHASH_ALL + sigs.append(sig) + + # Witness stack: [OP_0_dummy, sig1, ..., sig11, witness_script] + spending_tx.wit.vtxinwit[0].scriptWitness.stack = [b''] + sigs + [witness_script] + spending_tx.rehash() + + # Should be ACCEPTED: witness script is popped before size check + result = node.testmempoolaccept([spending_tx.serialize().hex()])[0] + assert_equal(result['allowed'], True) + self.log.info(" PASS: 11-of-15 P2WSH multisig accepted under reduced_data") + + def test_tapscript_script_exemption(self): + """Test that a large tapleaf script (>256 bytes) is exempted from the element size limit. + + Similar to P2WSH, for tapscript spends the tapleaf script is popped from the + witness stack BEFORE the element size check runs in ExecuteWitnessScript, + so it is implicitly exempted. + """ + self.log.info("Testing tapleaf script size exemption...") + + node = self.nodes[0] + + # Build a tapscript >256 bytes using repeated OP_DROP, ending with OP_TRUE + # Each data push is ≤256 bytes (valid), but the total script exceeds 256 bytes. + large_tapscript = CScript([b'\x00' * 200, OP_DROP, b'\x00' * 200, OP_DROP, OP_TRUE]) + assert len(large_tapscript) > MAX_SCRIPT_ELEMENT_SIZE_REDUCED, \ + f"Tapscript should exceed 256 bytes, got {len(large_tapscript)}" + self.log.info(f" Tapleaf script size: {len(large_tapscript)} bytes") + + # Construct taproot output with this script as a leaf + privkey = generate_privkey() + internal_pubkey, _ = compute_xonly_pubkey(privkey) + taproot_info = taproot_construct(internal_pubkey, [("large_script", large_tapscript)]) + taproot_spk = taproot_info.scriptPubKey + + # Fund the taproot output + funding_tx = self.create_test_transaction(taproot_spk) + funding_txid = funding_tx.rehash() + + block_height = node.getblockcount() + 1 + block = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block.vtx.append(funding_tx) + add_witness_commitment(block) + block.solve() + node.submitblock(block.serialize().hex()) + + # Spend via script path + leaf_info = taproot_info.leaves["large_script"] + control_block = bytes([leaf_info.version + taproot_info.negflag]) + internal_pubkey + leaf_info.merklebranch + + spending_tx = CTransaction() + spending_tx.vin = [CTxIn(COutPoint(int(funding_txid, 16), 0), nSequence=0)] + output_value = funding_tx.vout[0].nValue - 1000 + spending_tx.vout = [CTxOut(output_value, CScript([OP_1, bytes(20)]))] + + # Witness stack: [, script, control_block] + # The script just does DROP DROP TRUE, so no stack inputs needed + spending_tx.wit.vtxinwit.append(CTxInWitness()) + spending_tx.wit.vtxinwit[0].scriptWitness.stack = [large_tapscript, control_block] + + spending_tx.rehash() + + # Should be ACCEPTED: tapleaf script is popped before size check + result = node.testmempoolaccept([spending_tx.serialize().hex()])[0] + assert_equal(result['allowed'], True) + self.log.info(" PASS: >256-byte tapleaf script accepted under reduced_data") + + def run_test(self): + self.init_test() + + # Run all spec tests + self.test_output_script_size_limit() + self.test_pushdata_size_limit() + self.test_undefined_witness_versions() + self.test_taproot_annex_rejection() + self.test_taproot_control_block_size() + self.test_op_success_rejection() + self.test_op_if_notif_rejection() + self.test_mandatory_flags_cannot_be_bypassed() + self.test_p2wsh_multisig_witness_script_exemption() + self.test_tapscript_script_exemption() + + self.log.info("All ReducedData tests completed") + + +if __name__ == '__main__': + ReducedDataTest(__file__).main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index bf65aa903a..d69f4772d4 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -97,6 +97,7 @@ 'feature_taproot.py', 'feature_reduced_data_temporary_deployment.py', 'feature_bip9_max_activation_height.py', + 'feature_rdts.py', 'feature_block.py', 'mempool_ephemeral_dust.py', 'wallet_conflicts.py --legacy-wallet', From ea4f5770ea7714a826c69306ce5d0423bae506a5 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Fri, 7 Nov 2025 12:58:01 -0600 Subject: [PATCH 318/356] test: Add UTXO height-based REDUCED_DATA enforcement test --- .../feature_reduced_data_utxo_height.py | 471 ++++++++++++++++++ test/functional/test_runner.py | 1 + 2 files changed, 472 insertions(+) create mode 100644 test/functional/feature_reduced_data_utxo_height.py diff --git a/test/functional/feature_reduced_data_utxo_height.py b/test/functional/feature_reduced_data_utxo_height.py new file mode 100644 index 0000000000..e24e7ae7fc --- /dev/null +++ b/test/functional/feature_reduced_data_utxo_height.py @@ -0,0 +1,471 @@ +#!/usr/bin/env python3 +# Copyright (c) 2025 The Bitcoin Knots developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test REDUCED_DATA soft fork UTXO height checking. + +This test verifies that the REDUCED_DATA deployment correctly exempts UTXOs +created before ReducedDataHeightBegin from reduced_data script validation rules, +as implemented in validation.cpp. + +Test scenarios: +1. Old UTXO (created before activation) spent during active period with violation - should be ACCEPTED (EXEMPT) +2. New UTXO (created during active period) spent with violation - should be REJECTED +3. Mixed inputs (old + new UTXOs) in same transaction +4. Boundary test: UTXO created at exactly ReducedDataHeightBegin +""" + +from io import BytesIO + +from test_framework.blocktools import ( + COINBASE_MATURITY, + create_block, + create_coinbase, + add_witness_commitment, +) +from test_framework.messages import ( + COIN, + COutPoint, + CTransaction, + CTxIn, + CTxInWitness, + CTxOut, +) +from test_framework.p2p import P2PDataStore +from test_framework.script import ( + CScript, + OP_TRUE, + OP_DROP, + hash256, +) +from test_framework.script_util import ( + script_to_p2wsh_script, +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, +) +from test_framework.wallet import MiniWallet + + +# BIP9 constants for regtest +BIP9_PERIOD = 144 # blocks per period in regtest +BIP9_THRESHOLD = 108 # 75% of 144 +VERSIONBITS_TOP_BITS = 0x20000000 +REDUCED_DATA_BIT = 4 + +# REDUCED_DATA enforces MAX_SCRIPT_ELEMENT_SIZE_REDUCED (256) instead of MAX_SCRIPT_ELEMENT_SIZE (520) +MAX_ELEMENT_SIZE_STANDARD = 520 +MAX_ELEMENT_SIZE_REDUCED = 256 +VIOLATION_SIZE = 300 # Violates reduced (256) but OK for standard (520) + + +class ReducedDataUTXOHeightTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + # Activate REDUCED_DATA using BIP9 with min_activation_height=288 + # Due to BIP9 design, period 0 is always DEFINED, so signaling happens in period 1 + # This activates at height 432 (start of period 3) + # Format: deployment:start:timeout:min_activation_height:max_activation_height:active_duration + # start_time=0, timeout=999999999999 (never), min_activation_height=288, max=2147483647 (INT_MAX, disabled), active_duration=2147483647 (permanent) + self.extra_args = [[ + '-vbparams=reduced_data:0:999999999999:288:2147483647:2147483647', + ]] + + def create_p2wsh_funding_and_spending_tx(self, wallet, node, witness_element_size): + """Create a P2WSH output, then a transaction spending it with custom witness size. + + Returns: + tuple: (funding_tx, spending_tx) where funding_tx creates P2WSH output, + spending_tx spends it with witness element of specified size + """ + # Create a simple witness script: OP_DROP OP_TRUE + # This allows us to put arbitrary data in the witness + witness_script = CScript([OP_DROP, OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) + + # Use MiniWallet to create funding transaction to P2WSH output + funding_txid = wallet.send_to(from_node=node, scriptPubKey=script_pubkey, amount=100000)['txid'] + funding_tx_hex = node.getrawtransaction(funding_txid) + funding_tx = CTransaction() + funding_tx.deserialize(BytesIO(bytes.fromhex(funding_tx_hex))) + funding_tx.rehash() # Calculate sha256 hash after deserializing + + # Find the P2WSH output + p2wsh_vout = None + for i, vout in enumerate(funding_tx.vout): + if vout.scriptPubKey == script_pubkey: + p2wsh_vout = i + break + assert p2wsh_vout is not None, "P2WSH output not found" + + # Spending transaction: spend P2WSH output with custom witness + spending_tx = CTransaction() + spending_tx.vin = [CTxIn(COutPoint(funding_tx.sha256, p2wsh_vout))] + spending_tx.vout = [CTxOut(funding_tx.vout[p2wsh_vout].nValue - 1000, CScript([OP_TRUE]))] + + # Create witness with element of specified size + spending_tx.wit.vtxinwit.append(CTxInWitness()) + spending_tx.wit.vtxinwit[0].scriptWitness.stack = [ + b'\x42' * witness_element_size, # Data element of specified size + witness_script # Witness script + ] + spending_tx.rehash() + + return funding_tx, spending_tx + + def create_test_block(self, txs, signal=False): + """Create a block with the given transactions.""" + # Always get fresh tip and height to ensure blocks chain correctly + tip = self.nodes[0].getbestblockhash() + height = self.nodes[0].getblockcount() + 1 + tip_header = self.nodes[0].getblockheader(tip) + block_time = tip_header['time'] + 1 + block = create_block(int(tip, 16), create_coinbase(height), ntime=block_time, txlist=txs) + if signal: + block.nVersion = VERSIONBITS_TOP_BITS | (1 << REDUCED_DATA_BIT) + add_witness_commitment(block) + block.solve() + return block + + def mine_blocks(self, count, signal=False): + """Mine blocks with optional BIP9 signaling for REDUCED_DATA.""" + for _ in range(count): + block = self.create_test_block([], signal=signal) + result = self.nodes[0].submitblock(block.serialize().hex()) + if result is not None: + raise AssertionError(f"submitblock failed: {result}") + # Verify block was accepted + assert self.nodes[0].getbestblockhash() == block.hash + + def run_test(self): + node = self.nodes[0] + self.peer = node.add_p2p_connection(P2PDataStore()) + + # Use MiniWallet for easy UTXO management + wallet = MiniWallet(node) + + self.log.info("Mining blocks to activate REDUCED_DATA via BIP9...") + + # BIP9 state timeline with start_time=0: + # - Period 0 (blocks 0-143): DEFINED (cannot signal yet) + # - Period 1 (blocks 144-287): STARTED (signal here with 108/144 threshold) + # - Period 2 (blocks 288-431): LOCKED_IN (if threshold met in period 1) + # - Period 3 (blocks 432-575): ACTIVE + + # Mine through period 0 (DEFINED state) + self.log.info("Mining through period 0 (DEFINED)...") + self.generate(wallet, 144) + self.log.info(f"DEBUG: After period 0, height = {node.getblockcount()}") + + # Mine 108 signaling blocks in period 1 (STARTED state) + self.log.info("Mining 108 signaling blocks in period 1 (blocks 144-251)...") + self.mine_blocks(108, signal=True) + self.log.info(f"DEBUG: After 108 signaling blocks, height = {node.getblockcount()}") + + # Mine to end of period 1 (block 287) + self.log.info("Mining to end of period 1 (block 287)...") + self.mine_blocks(287 - 144 - 108, signal=False) + self.log.info(f"DEBUG: After period 1, height = {node.getblockcount()}") + + # Check that we're LOCKED_IN at start of period 2 + self.generate(wallet, 1) # Mine block 288 + self.log.info(f"DEBUG: After mining block 288, height = {node.getblockcount()}") + deployment_info = node.getdeploymentinfo() + rd_info = deployment_info['deployments']['reduced_data'] + if 'bip9' in rd_info: + status = rd_info['bip9']['status'] + self.log.info(f"At height {node.getblockcount()}, REDUCED_DATA status: {status}") + assert status == 'locked_in', f"Expected LOCKED_IN at block 288, got {status}" + else: + raise AssertionError("REDUCED_DATA deployment not found") + + # Mine to block 432 (start of period 3) where activation occurs + self.log.info("Mining to block 432 for activation...") + self.generate(wallet, 432 - 288) + + current_height = node.getblockcount() + + # Check activation status + deployment_info = node.getdeploymentinfo() + rd_info = deployment_info['deployments']['reduced_data'] + if 'bip9' in rd_info: + status = rd_info['bip9']['status'] + self.log.info(f"At height {current_height}, REDUCED_DATA status: {status}") + if status == 'active': + ACTIVATION_HEIGHT = rd_info['bip9']['since'] + else: + raise AssertionError(f"REDUCED_DATA not active at height {current_height}, status: {status}") + else: + raise AssertionError("REDUCED_DATA deployment not found") + + self.log.info(f"✓ REDUCED_DATA activated at height {ACTIVATION_HEIGHT}") + assert ACTIVATION_HEIGHT == 432, f"Expected activation at 432, got {ACTIVATION_HEIGHT}" + + # Initialize wallet with some coins + self.generate(wallet, COINBASE_MATURITY + 10) + current_height = node.getblockcount() + + # Now rewind to before activation to create test UTXOs + # Save the tip so we can restore later + activation_tip = node.getbestblockhash() + + # Rewind to 20 blocks before activation + target_height = ACTIVATION_HEIGHT - 20 + blocks_to_invalidate = current_height - target_height + self.log.info(f"Rewinding {blocks_to_invalidate} blocks to height {target_height}...") + for _ in range(blocks_to_invalidate): + node.invalidateblock(node.getbestblockhash()) + + assert_equal(node.getblockcount(), target_height) + + # ====================================================================== + # Test 1: Create OLD UTXO before activation + # ====================================================================== + self.log.info("Test 1: Creating P2WSH UTXO before activation height...") + + # Create P2WSH funding transaction for old UTXO + old_funding_tx, old_spending_tx = self.create_p2wsh_funding_and_spending_tx( + wallet, node, VIOLATION_SIZE + ) + + # Confirm the funding transaction in a block + block = self.create_test_block([old_funding_tx], signal=False) + node.submitblock(block.serialize().hex()) + old_utxo_height = node.getblockcount() + + self.log.info(f"Created old P2WSH UTXO at height {old_utxo_height} (< {ACTIVATION_HEIGHT})") + + # ====================================================================== + # Test 2: Mine to activation height + # ====================================================================== + self.log.info("Test 2: Mining to activation height...") + + current_height = node.getblockcount() + blocks_to_activation = ACTIVATION_HEIGHT - current_height + if blocks_to_activation > 0: + self.mine_blocks(blocks_to_activation, signal=False) + + current_height = node.getblockcount() + assert_equal(current_height, ACTIVATION_HEIGHT) + self.log.info(f"At activation height: {current_height}") + + # Verify REDUCED_DATA is active + deployment_info = node.getdeploymentinfo() + rd_info = deployment_info['deployments']['reduced_data'] + if 'bip9' in rd_info: + status = rd_info['bip9']['status'] + else: + status = 'active' if rd_info.get('active') else 'unknown' + assert status == 'active', f"Expected 'active' at height {current_height}, got '{status}'" + + # ====================================================================== + # Test 3: Create NEW UTXO at/after activation + # ====================================================================== + self.log.info("Test 3: Creating P2WSH UTXO at activation height...") + + # Create P2WSH funding transaction for new UTXO + new_funding_tx, new_spending_tx = self.create_p2wsh_funding_and_spending_tx( + wallet, node, VIOLATION_SIZE + ) + + # Confirm the funding transaction in a block + block = self.create_test_block([new_funding_tx], signal=False) + node.submitblock(block.serialize().hex()) + new_utxo_height = node.getblockcount() + + self.log.info(f"Created new P2WSH UTXO at height {new_utxo_height} (>= {ACTIVATION_HEIGHT})") + + # Mine a few more blocks + self.mine_blocks(5, signal=False) + current_height = node.getblockcount() + self.log.info(f"Current height: {current_height}") + + # ====================================================================== + # Test 4: Spend OLD UTXO with oversized witness - should be ACCEPTED + # ====================================================================== + self.log.info(f"Test 4: Spending old UTXO (height {old_utxo_height}) with {VIOLATION_SIZE}-byte witness element...") + self.log.info(f" This violates REDUCED_DATA ({MAX_ELEMENT_SIZE_REDUCED} limit) but old UTXOs should be EXEMPT") + + # Try to mine block with old_spending_tx (has 300-byte witness element) + block = self.create_test_block([old_spending_tx], signal=False) + result = node.submitblock(block.serialize().hex()) + assert result is None, f"Expected success, got: {result}" + + self.log.info(f"✓ SUCCESS: Old UTXO with {VIOLATION_SIZE}-byte witness element was ACCEPTED (correctly exempt)") + + # ====================================================================== + # Test 5: Spend NEW UTXO with oversized witness - should be REJECTED + # ====================================================================== + self.log.info(f"Test 5: Spending new UTXO (height {new_utxo_height}) with {VIOLATION_SIZE}-byte witness element...") + self.log.info(f" This violates REDUCED_DATA ({MAX_ELEMENT_SIZE_REDUCED} limit) and should be REJECTED") + + # Try to mine block with new_spending_tx (has 300-byte witness element) + block = self.create_test_block([new_spending_tx], signal=False) + result = node.submitblock(block.serialize().hex()) + assert result is not None and 'mandatory-script-verify-flag-failed' in result, f"Expected rejection, got: {result}" + + self.log.info(f"✓ SUCCESS: New UTXO with {VIOLATION_SIZE}-byte witness element was REJECTED (correctly enforced)") + + # ====================================================================== + # Test 6: Boundary test - UTXO at exactly ReducedDataHeightBegin + # ====================================================================== + self.log.info(f"Test 6: Boundary test - verifying UTXO at activation height {ACTIVATION_HEIGHT}...") + + # The new_funding_tx was confirmed at height ACTIVATION_HEIGHT+1, but let's create one AT height ACTIVATION_HEIGHT + # First, invalidate back to height ACTIVATION_HEIGHT-1 + current_tip = node.getbestblockhash() + blocks_to_invalidate = node.getblockcount() - (ACTIVATION_HEIGHT - 1) + for _ in range(blocks_to_invalidate): + node.invalidateblock(node.getbestblockhash()) + + assert_equal(node.getblockcount(), ACTIVATION_HEIGHT - 1) + self.log.info(f" Rewound to height {node.getblockcount()}") + + # Create UTXO exactly at activation height + boundary_funding_tx, boundary_spending_tx = self.create_p2wsh_funding_and_spending_tx( + wallet, node, VIOLATION_SIZE + ) + block = self.create_test_block([boundary_funding_tx], signal=False) + result = node.submitblock(block.serialize().hex()) + assert result is None, f"Expected success, got: {result}" + boundary_height = node.getblockcount() + assert_equal(boundary_height, ACTIVATION_HEIGHT) + + self.log.info(f" Created boundary UTXO at height {boundary_height} (exactly at activation)") + + # Mine a few blocks past activation + self.mine_blocks(5, signal=False) + + # Try to spend boundary UTXO - should be REJECTED (height ACTIVATION_HEIGHT >= ACTIVATION_HEIGHT) + self.log.info(f" Spending boundary UTXO with {VIOLATION_SIZE}-byte witness (should be REJECTED)") + block = self.create_test_block([boundary_spending_tx], signal=False) + result = node.submitblock(block.serialize().hex()) + assert result is not None and 'mandatory-script-verify-flag-failed' in result, f"Expected rejection, got: {result}" + + self.log.info(f"✓ SUCCESS: UTXO at exactly activation height {ACTIVATION_HEIGHT} is SUBJECT to rules (not exempt)") + + # Restore chain to where we were + node.reconsiderblock(current_tip) + + # ====================================================================== + # Test 7: Mixed inputs - one old (exempt) + one new (subject to rules) + # ====================================================================== + self.log.info("Test 7: Creating transaction with mixed inputs (old + new UTXOs)...") + + # We need fresh old and new UTXOs. Rewind to before activation again + current_tip2 = node.getbestblockhash() + blocks_to_invalidate = node.getblockcount() - (ACTIVATION_HEIGHT - 20) + for _ in range(blocks_to_invalidate): + node.invalidateblock(node.getbestblockhash()) + + # Create OLD UTXO at height before activation + old_mixed_funding, old_mixed_spending = self.create_p2wsh_funding_and_spending_tx( + wallet, node, VIOLATION_SIZE + ) + block = self.create_test_block([old_mixed_funding], signal=False) + node.submitblock(block.serialize().hex()) + old_mixed_height = node.getblockcount() + self.log.info(f" Created old UTXO at height {old_mixed_height}") + + # Mine to after activation + blocks_to_mine = ACTIVATION_HEIGHT - node.getblockcount() + 5 + self.mine_blocks(blocks_to_mine, signal=False) + + # Create NEW UTXO at height after activation + new_mixed_funding, new_mixed_spending = self.create_p2wsh_funding_and_spending_tx( + wallet, node, VIOLATION_SIZE + ) + block = self.create_test_block([new_mixed_funding], signal=False) + node.submitblock(block.serialize().hex()) + new_mixed_height = node.getblockcount() + self.log.info(f" Created new UTXO at height {new_mixed_height}") + + # Find P2WSH outputs in funding transactions + witness_script = CScript([OP_DROP, OP_TRUE]) + script_pubkey = script_to_p2wsh_script(witness_script) + + old_p2wsh_vout = None + for i, vout in enumerate(old_mixed_funding.vout): + if vout.scriptPubKey == script_pubkey: + old_p2wsh_vout = i + break + + new_p2wsh_vout = None + for i, vout in enumerate(new_mixed_funding.vout): + if vout.scriptPubKey == script_pubkey: + new_p2wsh_vout = i + break + + # Create transaction with BOTH inputs + mixed_tx = CTransaction() + mixed_tx.vin = [ + CTxIn(COutPoint(old_mixed_funding.sha256, old_p2wsh_vout)), # Old UTXO (exempt) + CTxIn(COutPoint(new_mixed_funding.sha256, new_p2wsh_vout)), # New UTXO (subject to rules) + ] + total_value = (old_mixed_funding.vout[old_p2wsh_vout].nValue + + new_mixed_funding.vout[new_p2wsh_vout].nValue - 2000) + mixed_tx.vout = [CTxOut(total_value, CScript([OP_TRUE]))] + + # Add witness for both inputs - both with 300-byte elements + mixed_tx.wit.vtxinwit = [] + + # Input 0: old UTXO (would pass alone) + wit0 = CTxInWitness() + wit0.scriptWitness.stack = [b'\x42' * VIOLATION_SIZE, witness_script] + mixed_tx.wit.vtxinwit.append(wit0) + + # Input 1: new UTXO (would fail) + wit1 = CTxInWitness() + wit1.scriptWitness.stack = [b'\x42' * VIOLATION_SIZE, witness_script] + mixed_tx.wit.vtxinwit.append(wit1) + + mixed_tx.rehash() + + self.log.info(f" Mixed tx: old UTXO (height {old_mixed_height}, exempt) + new UTXO (height {new_mixed_height}, subject)") + self.log.info(f" Both inputs have {VIOLATION_SIZE}-byte witness elements") + + # Try to mine block - should REJECT because new input violates + self.mine_blocks(2, signal=False) + block = self.create_test_block([mixed_tx], signal=False) + result = node.submitblock(block.serialize().hex()) + assert result is not None and 'mandatory-script-verify-flag-failed' in result, f"Expected rejection, got: {result}" + + self.log.info(f"✓ SUCCESS: Mixed transaction REJECTED (new input violated rules, even though old input was exempt)") + + # Restore chain + node.reconsiderblock(current_tip2) + + # ====================================================================== + # Summary + # ====================================================================== + self.log.info(f""" + ============================================================ + TEST SUMMARY - UTXO Height-Based REDUCED_DATA Enforcement + ============================================================ + + ✓ Test 1-3: Setup old and new UTXOs at correct heights + ✓ Test 4: Old UTXO (height < {ACTIVATION_HEIGHT}) is EXEMPT - 300-byte witness ACCEPTED + ✓ Test 5: New UTXO (height >= {ACTIVATION_HEIGHT}) is SUBJECT - 300-byte witness REJECTED + ✓ Test 6: Boundary condition - UTXO at exactly height {ACTIVATION_HEIGHT} is SUBJECT + ✓ Test 7: Mixed inputs - transaction rejected if ANY input violates + + Key validations: + • REDUCED_DATA activated via BIP9 signaling at height {ACTIVATION_HEIGHT} + • UTXOs created before activation height are EXEMPT from rules + • UTXOs created at/after activation height are SUBJECT to rules + • Per-input validation flags work correctly (validation.cpp) + • Boundary at activation height uses >= operator (not >) + + This confirms the implementation of UTXO height exemption: + "Exempt inputs spending UTXOs prior to ReducedDataHeightBegin from + reduced_data script validation rules" + + All 7 tests passed! + ============================================================ + """) + + +if __name__ == '__main__': + ReducedDataUTXOHeightTest(__file__).main() diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index d69f4772d4..15d95b02d5 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -152,6 +152,7 @@ 'p2p_headers_sync_with_minchainwork.py', 'p2p_feefilter.py', 'feature_csv_activation.py', + 'feature_reduced_data_utxo_height.py', 'p2p_sendheaders.py', 'feature_config_args.py', 'wallet_listtransactions.py --legacy-wallet', From 44904f004f2c7f4b079993ce6b3d5a8f0c3be809 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Fri, 7 Nov 2025 17:33:04 -0600 Subject: [PATCH 319/356] test: Update tests for REDUCED_DATA consensus limits Adapt unit tests to comply with REDUCED_DATA restrictions: - Add REDUCED_DATA flag to mapFlagNames in transaction_tests - Update witness test from 520-byte to 256-byte push limit - Accept SCRIPT_ERR_PUSH_SIZE in miniscript satisfaction tests - Update Taproot tree depth tests from 128 to 7 levels - Fix descriptor error message to report correct nesting limit (7) REDUCED_DATA enforces MAX_SCRIPT_ELEMENT_SIZE_REDUCED (256 bytes) and TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED (7 levels) at the policy level via STANDARD_SCRIPT_VERIFY_FLAGS. --- src/script/descriptor.cpp | 2 +- src/test/data/tx_valid.json | 6 +++--- src/test/miniscript_tests.cpp | 31 ++++++++++++++++++++++++------ src/test/script_standard_tests.cpp | 7 ++++--- src/test/transaction_tests.cpp | 1 + 5 files changed, 34 insertions(+), 13 deletions(-) diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index a1615fbdcc..391780034b 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -1968,7 +1968,7 @@ std::vector> ParseScript(uint32_t& key_exp_index while (Const("{", expr)) { branches.push_back(false); // new left branch if (branches.size() > TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED) { - error = strprintf("tr() supports at most %i nesting levels", TAPROOT_CONTROL_MAX_NODE_COUNT); + error = strprintf("tr() supports at most %i nesting levels", TAPROOT_CONTROL_MAX_NODE_COUNT_REDUCED); return {}; } } diff --git a/src/test/data/tx_valid.json b/src/test/data/tx_valid.json index 70df0d0f69..547deefe2c 100644 --- a/src/test/data/tx_valid.json +++ b/src/test/data/tx_valid.json @@ -414,9 +414,9 @@ ["0000000000000000000000000000000000000000000000000000000000000100", 2, "0x51", 3000]], "0100000000010300010000000000000000000000000000000000000000000000000000000000000000000000ffffffff00010000000000000000000000000000000000000000000000000000000000000100000000ffffffff00010000000000000000000000000000000000000000000000000000000000000200000000ffffffff03e8030000000000000151d0070000000000000151b80b00000000000001510002483045022100a3cec69b52cba2d2de623ffffffffff1606184ea55476c0f8189fda231bc9cbb022003181ad597f7c380a7d1c740286b1d022b8b04ded028b833282e055e03b8efef812103596d3451025c19dbbdeb932d6bf8bfb4ad499b95b6f88db8899efac102e5fc710000000000", "DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM"], -["Witness with a push of 520 bytes"], -[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x20 0x33198a9bfef674ebddb9ffaa52928017b8472791e54c609cb95f278ac6b1e349", 1000]], -"0100000000010100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff010000000000000000015102fd08020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002755100000000", "NONE"], +["Witness with a push of 256 bytes (REDUCED_DATA limit)"], +[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x20 0xa57e25ffadd285772f5627ec6fa613bc8fb49b4db475c371dfd4eb76f25c5073", 1000]], +"0100000000010100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff010000000000000000015101fd05014d000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000755100000000", "NONE"], ["Transaction mixing all SigHash, segwit and normal inputs"], [[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x14 0x4c9c3dfac4207d5d8cb89df5722cb3d712385e3f", 1000], diff --git a/src/test/miniscript_tests.cpp b/src/test/miniscript_tests.cpp index 47fc45df4a..e7e32ea2b4 100644 --- a/src/test/miniscript_tests.cpp +++ b/src/test/miniscript_tests.cpp @@ -393,13 +393,25 @@ void TestSatisfy(const KeyConverter& converter, const std::string& testcase, con // Test non-malleable satisfaction. ScriptError serror; bool res = VerifyScript(CScript(), script_pubkey, &witness_nonmal, STANDARD_SCRIPT_VERIFY_FLAGS, checker, &serror); - // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions(). - if (node->ValidSatisfactions()) BOOST_CHECK(res); + // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions(), unless REDUCED_DATA rules are violated. + if (node->ValidSatisfactions()) { + BOOST_CHECK(res || + serror == ScriptError::SCRIPT_ERR_PUSH_SIZE || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS || + serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF); + } // More detailed: non-malleable satisfactions must be valid, or could fail with ops count error (if CheckOpsLimit failed), - // or with a stack size error (if CheckStackSize check fails). + // or with a stack size error (if CheckStackSize check fails), or with REDUCED_DATA-related errors. BOOST_CHECK(res || (!node->CheckOpsLimit() && serror == ScriptError::SCRIPT_ERR_OP_COUNT) || - (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE)); + (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE) || + (serror == ScriptError::SCRIPT_ERR_PUSH_SIZE) || + (serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM) || + (serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION) || + (serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS) || + (serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF)); } if (mal_success && (!nonmal_success || witness_mal.stack != witness_nonmal.stack)) { @@ -407,8 +419,15 @@ void TestSatisfy(const KeyConverter& converter, const std::string& testcase, con ScriptError serror; bool res = VerifyScript(CScript(), script_pubkey, &witness_mal, STANDARD_SCRIPT_VERIFY_FLAGS, checker, &serror); // Malleable satisfactions are not guaranteed to be valid under any conditions, but they can only - // fail due to stack or ops limits. - BOOST_CHECK(res || serror == ScriptError::SCRIPT_ERR_OP_COUNT || serror == ScriptError::SCRIPT_ERR_STACK_SIZE); + // fail due to stack or ops limits, or REDUCED_DATA-related errors. + BOOST_CHECK(res || + serror == ScriptError::SCRIPT_ERR_OP_COUNT || + serror == ScriptError::SCRIPT_ERR_STACK_SIZE || + serror == ScriptError::SCRIPT_ERR_PUSH_SIZE || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS || + serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF); } if (node->IsSane()) { diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp index e9ce82ca8a..042a6c6275 100644 --- a/src/test/script_standard_tests.cpp +++ b/src/test/script_standard_tests.cpp @@ -385,9 +385,10 @@ BOOST_AUTO_TEST_CASE(script_standard_taproot_builder) BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({2,2,0}), false); BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({2,2,1}), true); BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({2,2,2}), false); - BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({2,2,2,3,4,5,6,7,8,9,10,11,12,14,14,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,31,31,31,31,31,31,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,128}), true); - BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({128,128,127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1}), true); - BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({129,129,128,127,126,125,124,123,122,121,120,119,118,117,116,115,114,113,112,111,110,109,108,107,106,105,104,103,102,101,100,99,98,97,96,95,94,93,92,91,90,89,88,87,86,85,84,83,82,81,80,79,78,77,76,75,74,73,72,71,70,69,68,67,66,65,64,63,62,61,60,59,58,57,56,55,54,53,52,51,50,49,48,47,46,45,44,43,42,41,40,39,38,37,36,35,34,33,32,31,30,29,28,27,26,25,24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1}), false); + // REDUCED_DATA limits Taproot tree depth to 7 instead of 128 + BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({2,2,2,3,4,5,6,7,7}), true); + BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({7,7,6,5,4,3,2,1}), true); + BOOST_CHECK_EQUAL(TaprootBuilder::ValidDepths({8,8,7,6,5,4,3,2,1}), false); XOnlyPubKey key_inner{"79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798"_hex_u8}; XOnlyPubKey key_1{"c6047f9441ed7d6d3045406e95c07cd85c778e4b8cef3ca7abac09b95c709ee5"_hex_u8}; diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index f21f1f2ca2..58ed2a9c1b 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -71,6 +71,7 @@ static std::map mapFlagNames = { {std::string("DISCOURAGE_UPGRADABLE_PUBKEYTYPE"), (unsigned int)SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_PUBKEYTYPE}, {std::string("DISCOURAGE_OP_SUCCESS"), (unsigned int)SCRIPT_VERIFY_DISCOURAGE_OP_SUCCESS}, {std::string("DISCOURAGE_UPGRADABLE_TAPROOT_VERSION"), (unsigned int)SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION}, + {std::string("REDUCED_DATA"), (unsigned int)SCRIPT_VERIFY_REDUCED_DATA}, }; unsigned int ParseScriptFlags(std::string strFlags) From eb4a48597f080d03070de27cda38b511a477d933 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Fri, 7 Nov 2025 17:33:26 -0600 Subject: [PATCH 320/356] test: Replace thresh() with and_v() in descriptor test Replace thresh(2,pk(...),s:pk(...),adv:older(42)) with and_v(v:pk(...),pk(...)) because thresh() uses OP_IF opcodes which are completely forbidden in Tapscript when REDUCED_DATA is active (see src/script/interpreter.cpp:621-623). The and_v() construction provides equivalent 2-of-2 multisig functionality without conditional branching, making it compatible with REDUCED_DATA restrictions. Also update line 1010 test to expect "tr() supports at most 7 nesting levels" error instead of multi() error, as the test's 22 opening braces exceed REDUCED_DATA's 7-level limit before the parser can discover the multi() error. --- src/test/descriptor_tests.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp index 63c53a842c..223d2934ac 100644 --- a/src/test/descriptor_tests.cpp +++ b/src/test/descriptor_tests.cpp @@ -1006,7 +1006,8 @@ BOOST_AUTO_TEST_CASE(descriptor_test) CheckUnparsable("sh(and_v(vc:andor(pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),pk_k(Kx9HCDjGiwFcgVNhTrS5z5NeZdD6veeam61eDxLDCkGWujvL4Gnn),and_v(v:older(1),pk_k(L4o2kDvXXDRH2VS9uBnouScLduWt4dZnM25se7kvEjJeQ285en2A))),after(10)))", "sh(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10)))", "Miniscript expressions can only be used in wsh or tr."); CheckUnparsable("tr(and_v(vc:andor(pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),pk_k(Kx9HCDjGiwFcgVNhTrS5z5NeZdD6veeam61eDxLDCkGWujvL4Gnn),and_v(v:older(1),pk_k(L4o2kDvXXDRH2VS9uBnouScLduWt4dZnM25se7kvEjJeQ285en2A))),after(10)))", "tr(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10)))", "tr(): key 'and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10))' is not valid"); CheckUnparsable("raw(and_v(vc:andor(pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),pk_k(Kx9HCDjGiwFcgVNhTrS5z5NeZdD6veeam61eDxLDCkGWujvL4Gnn),and_v(v:older(1),pk_k(L4o2kDvXXDRH2VS9uBnouScLduWt4dZnM25se7kvEjJeQ285en2A))),after(10)))", "sh(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(02aa27e5eb2c185e87cd1dbc3e0efc9cb1175235e0259df1713424941c3cb40402))),after(10)))", "Miniscript expressions can only be used in wsh or tr."); - CheckUnparsable("", "tr(034D2224bbbbbbbbbbcbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb40,{{{{{{{{{{{{{{{{{{{{{{multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/967808'/9,xprvA1RpRA33e1JQ7ifknakTFNpgXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/968/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/585/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/2/0/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/5/8/5/8/24/5/58/52/5/8/5/2/8/24/5/58/588/246/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/5/4/5/58/55/58/2/5/8/55/2/5/8/58/555/58/2/5/8/4//2/5/58/5w/2/5/8/5/2/4/5/58/5558'/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/8/58/2/5/58/58/2/5/8/9/588/2/58/2/5/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/82/5/8/5/5/58/52/6/8/5/2/8/{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}{{{{{{{{{DDD2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8588/246/8/5/2DLDDDDDDDbbD3DDDD/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8D)/5/2/5/58/58/2/5/58/58/58/588/2/58/2/5/8/5/25/58/58/2/5/58/58/2/5/8/9/588/2/58/2/6780,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFW/8/5/2/5/58678008')", "'multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/967808'/9,xprvA1RpRA33e1JQ7ifknakTFNpgXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/968/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/585/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/2/0/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/5/8/5/8/24/5/58/52/5/8/5/2/8/24/5/58/588/246/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/5/4/5/58/55/58/2/5/8/55/2/5/8/58/555/58/2/5/8/4//2/5/58/5w/2/5/8/5/2/4/5/58/5558'/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/8/58/2/5/58/58/2/5/8/9/588/2/58/2/5/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/82/5/8/5/5/58/52/6/8/5/2/8/{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}{{{{{{{{{DDD2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8588/246/8/5/2DLDDDDDDDbbD3DDDD/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8D)/5/2/5/58/58/2/5/58/58/58/588/2/58/2/5/8/5/25/58/58/2/5/58/58/2/5/8/9/588/2/58/2/6780,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFW/8/5/2/5/58678008'' is not a valid descriptor function"); + // REDUCED_DATA limits Taproot nesting to 7 levels, so this test now hits that limit before the multi() error + CheckUnparsable("", "tr(034D2224bbbbbbbbbbcbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb40,{{{{{{{{{{{{{{{{{{{{{{multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/967808'/9,xprvA1RpRA33e1JQ7ifknakTFNpgXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/968/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/585/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/2/0/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/5/8/5/8/24/5/58/52/5/8/5/2/8/24/5/58/588/246/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/5/4/5/58/55/58/2/5/8/55/2/5/8/58/555/58/2/5/8/4//2/5/58/5w/2/5/8/5/2/4/5/58/5558'/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/8/2/5/8/5/5/8/58/2/5/58/58/2/5/8/9/588/2/58/2/5/8/5/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8/5/2/5/58/58/2/5/5/58/588/2/58/2/5/8/5/2/82/5/8/5/5/58/52/6/8/5/2/8/{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{{}{{{{{{{{{DDD2/5/8/5/2/5/58/58/2/5/58/58/588/2/58/2/8/5/8/5/4/5/58/588/2/6/8/5/2/8/2/5/8588/246/8/5/2DLDDDDDDDbbD3DDDD/8/2/5/8/5/2/5/58/58/2/5/5/5/58/588/2/6/8/5/2/8/2/5/8/2/58/2/5/8/5/2/8/5/8/3/4/5/58/55/2/5/58/58/2/5/5/5/8/5/2/8/5/85/2/8/2/5/8D)/5/2/5/58/58/2/5/58/58/58/588/2/58/2/5/8/5/25/58/58/2/5/58/58/2/5/8/9/588/2/58/2/6780,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFW/8/5/2/5/58678008')", "tr() supports at most 7 nesting levels"); // No uncompressed keys allowed CheckUnparsable("", "wsh(and_v(vc:andor(pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),pk_k(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0),and_v(v:older(1),pk_k(049228de6902abb4f541791f6d7f925b10e2078ccb1298856e5ea5cc5fd667f930eac37a00cc07f9a91ef3c2d17bf7a17db04552ff90ac312a5b8b4caca6c97aa4))),after(10)))", "Uncompressed keys are not allowed"); // No hybrid keys allowed @@ -1047,7 +1048,8 @@ BOOST_AUTO_TEST_CASE(descriptor_test) Check("wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE_FAILS, {{"0020cf62bf97baf977aec69cbc290c372899f913337a9093e8f066ab59b8657a365c"}}, OutputType::BECH32, /*op_desc_id=*/uint256{"8412ba3ac20ba3a30f81442d10d32e0468fa52814960d04e959bf84a9b813b88"}, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {}); Check("wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE, {{"0020cf62bf97baf977aec69cbc290c372899f913337a9093e8f066ab59b8657a365c"}}, OutputType::BECH32, /*op_desc_id=*/uint256{"8412ba3ac20ba3a30f81442d10d32e0468fa52814960d04e959bf84a9b813b88"}, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {{"ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588"_hex_v_u8, "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"_hex_v_u8}}); // Can have a Miniscript expression under tr() if it's alone. - Check("tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,thresh(2,pk(L1NKM8dVA1h52mwDrmk1YreTWkAZZTu2vmKLpmLEbFRqGQYjHeEV),s:pk(Kz3iCBy3HNGP5CZWDsAMmnCMFNwqdDohudVN9fvkrN7tAkzKNtM7),adv:older(42)))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,thresh(2,pk(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529),s:pk(9918d400c1b8c3c478340a40117ced4054b6b58f48cdb3c89b836bdfee1f5766),adv:older(42)))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,thresh(2,pk(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529),s:pk(9918d400c1b8c3c478340a40117ced4054b6b58f48cdb3c89b836bdfee1f5766),adv:older(42)))", MISSING_PRIVKEYS | XONLY_KEYS | SIGNABLE, {{"512033982eebe204dc66508e4b19cfc31b5ffc6e1bfcbf6e5597dfc2521a52270795"}}, OutputType::BECH32M); + // Note: thresh() uses OP_IF which is forbidden with REDUCED_DATA, so using and_v() instead + Check("tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,and_v(v:pk(L1NKM8dVA1h52mwDrmk1YreTWkAZZTu2vmKLpmLEbFRqGQYjHeEV),pk(Kz3iCBy3HNGP5CZWDsAMmnCMFNwqdDohudVN9fvkrN7tAkzKNtM7)))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,and_v(v:pk(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529),pk(9918d400c1b8c3c478340a40117ced4054b6b58f48cdb3c89b836bdfee1f5766)))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,and_v(v:pk(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529),pk(9918d400c1b8c3c478340a40117ced4054b6b58f48cdb3c89b836bdfee1f5766)))", MISSING_PRIVKEYS | XONLY_KEYS | SIGNABLE, {{"51202aca0fdcbfbc513549e2c9490e60ba54e3c345ff01d667c4f846c802c0e7b8f4"}}, OutputType::BECH32M); // Can have a pkh() expression alone as tr() script path (because pkh() is valid Miniscript). Check("tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,pkh(L1NKM8dVA1h52mwDrmk1YreTWkAZZTu2vmKLpmLEbFRqGQYjHeEV))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,pkh(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529))", "tr(a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,pkh(30a6069f344fb784a2b4c99540a91ee727c91e3a25ef6aae867d9c65b5f23529))", MISSING_PRIVKEYS | XONLY_KEYS | SIGNABLE, {{"51201e9875f690f5847404e4c5951e2f029887df0525691ee11a682afd37b608aad4"}}, OutputType::BECH32M); // Can have a Miniscript expression under tr() if it's part of a tree. From 5880e394bb920f57553d93328bf81992d1084f55 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Wed, 19 Nov 2025 18:49:20 -0600 Subject: [PATCH 321/356] Add mainnet configuration for REDUCED_DATA deployment --- src/kernel/chainparams.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index ca6adab18e..826a6562b3 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -104,7 +104,7 @@ class CMainParams : public CChainParams { consensus.fPowAllowMinDifficultyBlocks = false; consensus.enforce_BIP94 = false; consensus.fPowNoRetargeting = false; - consensus.nRuleChangeActivationThreshold = 1815; // 90% of 2016 + consensus.nRuleChangeActivationThreshold = 1109; // 55% of 2016 consensus.nMinerConfirmationWindow = 2016; // nPowTargetTimespan / nPowTargetSpacing consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 28; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; @@ -117,6 +117,14 @@ class CMainParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = 1628640000; // August 11th, 2021 consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 709632; // Approximately November 12th, 2021 + // ReducedData Temporary Softfork (RDTS) + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].bit = 4; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = 1764547200; // December 1st, 2025 + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].max_activation_height = 965664; // ~September 1st, 2026 + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].active_duration = 52416; // ~1 year + consensus.nMinimumChainWork = uint256{"0000000000000000000000000000000000000000dee8e2a309ad8a9820433c68"}; consensus.defaultAssumeValid = uint256{"00000000000000000000611fd22f2df7c8fbd0688745c3a6c3bb5109cc2a12cb"}; // 912683 From 6a042999bcb0b41f99208e9914d2c3ff9759f901 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20Haf?= Date: Sun, 14 Dec 2025 14:06:10 +0100 Subject: [PATCH 322/356] test: Fix fuzz for miniscript.cpp --- src/test/fuzz/miniscript.cpp | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/src/test/fuzz/miniscript.cpp b/src/test/fuzz/miniscript.cpp index 60d096bb5a..132f9f9d4d 100644 --- a/src/test/fuzz/miniscript.cpp +++ b/src/test/fuzz/miniscript.cpp @@ -1132,13 +1132,25 @@ void TestNode(const MsCtx script_ctx, const NodeRef& node, FuzzedDataProvider& p SatisfactionToWitness(script_ctx, witness_nonmal, script, builder); ScriptError serror; bool res = VerifyScript(DUMMY_SCRIPTSIG, script_pubkey, &witness_nonmal, STANDARD_SCRIPT_VERIFY_FLAGS, CHECKER_CTX, &serror); - // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions(). - if (node->ValidSatisfactions()) assert(res); + // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions(), unless REDUCED_DATA rules are violated. + if (node->ValidSatisfactions()) { + assert(res || + serror == ScriptError::SCRIPT_ERR_PUSH_SIZE || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS || + serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF); + } // More detailed: non-malleable satisfactions must be valid, or could fail with ops count error (if CheckOpsLimit failed), - // or with a stack size error (if CheckStackSize check failed). + // or with a stack size error (if CheckStackSize check failed), or with REDUCED_DATA-related errors. assert(res || (!node->CheckOpsLimit() && serror == ScriptError::SCRIPT_ERR_OP_COUNT) || - (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE)); + (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE) || + serror == ScriptError::SCRIPT_ERR_PUSH_SIZE || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS || + serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF); } if (mal_success && (!nonmal_success || witness_mal.stack != witness_nonmal.stack)) { @@ -1148,8 +1160,15 @@ void TestNode(const MsCtx script_ctx, const NodeRef& node, FuzzedDataProvider& p ScriptError serror; bool res = VerifyScript(DUMMY_SCRIPTSIG, script_pubkey, &witness_mal, STANDARD_SCRIPT_VERIFY_FLAGS, CHECKER_CTX, &serror); // Malleable satisfactions are not guaranteed to be valid under any conditions, but they can only - // fail due to stack or ops limits. - assert(res || serror == ScriptError::SCRIPT_ERR_OP_COUNT || serror == ScriptError::SCRIPT_ERR_STACK_SIZE); + // fail due to stack or ops limits, or REDUCED_DATA-related errors. + assert(res || + serror == ScriptError::SCRIPT_ERR_OP_COUNT || + serror == ScriptError::SCRIPT_ERR_STACK_SIZE || + serror == ScriptError::SCRIPT_ERR_PUSH_SIZE || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_UPGRADABLE_TAPROOT_VERSION || + serror == ScriptError::SCRIPT_ERR_DISCOURAGE_OP_SUCCESS || + serror == ScriptError::SCRIPT_ERR_TAPSCRIPT_MINIMALIF); } if (node->IsSane()) { From e4252a0a5686453008c00f85d1e06e22d51e0e46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20Haf?= Date: Mon, 15 Dec 2025 10:52:17 +0000 Subject: [PATCH 323/356] test: change permission and remove some f-string in logs --- test/functional/feature_bip9_max_activation_height.py | 2 +- test/functional/feature_rdts.py | 2 +- test/functional/feature_reduced_data_temporary_deployment.py | 0 test/functional/feature_reduced_data_utxo_height.py | 2 +- test/functional/mempool_dust.py | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) mode change 100644 => 100755 test/functional/feature_bip9_max_activation_height.py mode change 100644 => 100755 test/functional/feature_reduced_data_temporary_deployment.py mode change 100644 => 100755 test/functional/feature_reduced_data_utxo_height.py diff --git a/test/functional/feature_bip9_max_activation_height.py b/test/functional/feature_bip9_max_activation_height.py old mode 100644 new mode 100755 index 4b8cebba3c..ea68ca075e --- a/test/functional/feature_bip9_max_activation_height.py +++ b/test/functional/feature_bip9_max_activation_height.py @@ -396,7 +396,7 @@ def run_test(self): assert_equal(node.getblockcount(), 577) # Note: Status may still show 'active' but deployment should no longer be enforced # This matches the behavior in feature_temporary_deployment.py - self.log.info(f"Block 577: Deployment has expired (no longer enforced)") + self.log.info("Block 577: Deployment has expired (no longer enforced)") self.log.info("\n=== TEST 5 COMPLETE ===") self.log.info("SUCCESS: Temporary deployment with max_height activated and expired correctly") diff --git a/test/functional/feature_rdts.py b/test/functional/feature_rdts.py index 933fd9289a..b6cfa6f2f8 100755 --- a/test/functional/feature_rdts.py +++ b/test/functional/feature_rdts.py @@ -341,7 +341,7 @@ def test_undefined_witness_versions(self): self.log.info(f" ✓ Witness v{version} spending correctly rejected ({result['reject-reason']})") # All undefined versions (v2-v16) are validated identically - self.log.info(f" ✓ Witness versions v2-v16 are all similarly rejected") + self.log.info(" ✓ Witness versions v2-v16 are all similarly rejected") def test_taproot_annex_rejection(self): """Test spec 4: Witness stacks with a Taproot annex are invalid.""" diff --git a/test/functional/feature_reduced_data_temporary_deployment.py b/test/functional/feature_reduced_data_temporary_deployment.py old mode 100644 new mode 100755 diff --git a/test/functional/feature_reduced_data_utxo_height.py b/test/functional/feature_reduced_data_utxo_height.py old mode 100644 new mode 100755 index e24e7ae7fc..999c9c2811 --- a/test/functional/feature_reduced_data_utxo_height.py +++ b/test/functional/feature_reduced_data_utxo_height.py @@ -432,7 +432,7 @@ def run_test(self): result = node.submitblock(block.serialize().hex()) assert result is not None and 'mandatory-script-verify-flag-failed' in result, f"Expected rejection, got: {result}" - self.log.info(f"✓ SUCCESS: Mixed transaction REJECTED (new input violated rules, even though old input was exempt)") + self.log.info("✓ SUCCESS: Mixed transaction REJECTED (new input violated rules, even though old input was exempt)") # Restore chain node.reconsiderblock(current_tip2) diff --git a/test/functional/mempool_dust.py b/test/functional/mempool_dust.py index 557c2938f7..1a25838c85 100755 --- a/test/functional/mempool_dust.py +++ b/test/functional/mempool_dust.py @@ -163,7 +163,7 @@ def test_output_size_limit(self): tx.vout.append(CTxOut(nValue=1000, scriptPubKey=script_34)) res = node.testmempoolaccept([tx.serialize().hex()])[0] assert_equal(res['allowed'], True) - self.log.info(f" ✓ Exactly 34 bytes accepted (boundary)") + self.log.info(" ✓ Exactly 34 bytes accepted (boundary)") # 35 bytes should fail (create a witness program v0 with 33-byte data - invalid but tests size) script_35 = CScript([0, bytes(33)]) # OP_0 + 33 bytes = 35 bytes From 35a6d5a64b0af17c34aca8d9063ad5ed977c605a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20Haf?= Date: Mon, 15 Dec 2025 13:17:29 +0000 Subject: [PATCH 324/356] test: use the correct flag for ignore_rejects in feature_uasf_reduced_data.py --- test/functional/feature_rdts.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/feature_rdts.py b/test/functional/feature_rdts.py index b6cfa6f2f8..b360624592 100755 --- a/test/functional/feature_rdts.py +++ b/test/functional/feature_rdts.py @@ -761,7 +761,7 @@ def test_mandatory_flags_cannot_be_bypassed(self): self.log.info(" This bypasses PolicyScriptChecks but NOT ConsensusScriptChecks") result_bypass = node.testmempoolaccept( rawtxs=[spending_tx_257.serialize().hex()], - ignore_rejects=["non-mandatory-script-verify-flag"] + ignore_rejects=["mempool-script-verify-flag-failed"] )[0] # The transaction should still be rejected because ConsensusScriptChecks From 85e16e1478b43a318cbad66371f388e26fa26781 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Mon, 22 Dec 2025 10:42:57 -0600 Subject: [PATCH 325/356] test: Add retry logic to mempool_limit for i686 race condition --- test/functional/mempool_limit.py | 44 +++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/test/functional/mempool_limit.py b/test/functional/mempool_limit.py index 1398b2cd54..41ebd72b4c 100755 --- a/test/functional/mempool_limit.py +++ b/test/functional/mempool_limit.py @@ -5,7 +5,9 @@ """Test mempool limiting together/eviction with the wallet.""" from decimal import Decimal +import time +from test_framework.authproxy import JSONRPCException from test_framework.mempool_util import ( fill_mempool, ) @@ -200,12 +202,42 @@ def test_mid_package_eviction(self): # Mempool transaction which is evicted due to being at the "bottom" of the mempool when the # mempool overflows and evicts by descendant score. It's important that the eviction doesn't # happen in the middle of package evaluation, as it can invalidate the coins cache. - mempool_evicted_tx = self.wallet.send_self_transfer( - from_node=node, - fee_rate=mempoolmin_feerate, - target_vsize=evicted_vsize, - confirmed_only=True - ) + # + # NOTE: On 32-bit systems (i686), there's a race condition where concurrent transaction additions + # can cause the mempool to repeatedly exceed the limit, causing immediate eviction of low-fee + # transactions. We retry with exponential backoff to handle this scenario. + mempool_evicted_tx = None + max_retries = 20 + for attempt in range(max_retries): + try: + # Brief backoff on retries to let concurrent operations settle + if attempt > 0: + backoff = min(0.05 * (2 ** (attempt - 1)), 2.0) # Exponential backoff, max 2 seconds + self.log.info(f"Retry attempt {attempt + 1}/{max_retries} after {backoff:.2f}s backoff...") + time.sleep(backoff) + # Rescan UTXOs to recover any that failed to be added + self.wallet.rescan_utxos() + # Update minimum feerate as it may have increased during retries + mempoolmin_feerate = node.getmempoolinfo()["mempoolminfee"] + + mempool_evicted_tx = self.wallet.send_self_transfer( + from_node=node, + fee_rate=mempoolmin_feerate, + target_vsize=evicted_vsize, + confirmed_only=True + ) + if attempt > 0: + self.log.info(f"Successfully added transaction on attempt {attempt + 1}") + break + except JSONRPCException as e: + if e.error['code'] == -26: # mempool full or min fee not met + if attempt < max_retries - 1: + continue + else: + self.log.error(f"Failed to add transaction after {max_retries} attempts due to race condition") + raise + + assert mempool_evicted_tx is not None, "Failed to add transaction after retries" # Already in mempool when package is submitted. assert mempool_evicted_tx["txid"] in node.getrawmempool() From b90874c614e94d3868442c2f854dceefbd684d8f Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Mon, 29 Dec 2025 15:14:08 -0600 Subject: [PATCH 326/356] Add per-deployment BIP9 threshold; restore global to 90%, set reduced_data to 55% --- src/consensus/params.h | 3 +++ src/kernel/chainparams.cpp | 5 ++++- src/versionbits.cpp | 6 +++++- 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/consensus/params.h b/src/consensus/params.h index 0c765805bd..c2ac24bfb5 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -60,6 +60,9 @@ struct BIP9Deployment { /** For temporary softforks: number of blocks the deployment remains active after activation. * std::numeric_limits::max() means permanent (never expires). */ int active_duration{std::numeric_limits::max()}; + /** Per-deployment activation threshold. If 0, uses the global nRuleChangeActivationThreshold. + * Otherwise, specifies the number of blocks required for this specific deployment. */ + int threshold{0}; /** Constant for nTimeout very far in the future. */ static constexpr int64_t NO_TIMEOUT = std::numeric_limits::max(); diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 826a6562b3..f9999d9af8 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -104,7 +104,7 @@ class CMainParams : public CChainParams { consensus.fPowAllowMinDifficultyBlocks = false; consensus.enforce_BIP94 = false; consensus.fPowNoRetargeting = false; - consensus.nRuleChangeActivationThreshold = 1109; // 55% of 2016 + consensus.nRuleChangeActivationThreshold = 1815; // 90% of 2016 consensus.nMinerConfirmationWindow = 2016; // nPowTargetTimespan / nPowTargetSpacing consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].bit = 28; consensus.vDeployments[Consensus::DEPLOYMENT_TESTDUMMY].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; @@ -124,6 +124,7 @@ class CMainParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].max_activation_height = 965664; // ~September 1st, 2026 consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].active_duration = 52416; // ~1 year + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].threshold = 1109; // 55% of 2016 consensus.nMinimumChainWork = uint256{"0000000000000000000000000000000000000000dee8e2a309ad8a9820433c68"}; consensus.defaultAssumeValid = uint256{"00000000000000000000611fd22f2df7c8fbd0688745c3a6c3bb5109cc2a12cb"}; // 912683 @@ -294,6 +295,7 @@ class CTestNetParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].active_duration = 52416; // ~1 year + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].threshold = 1109; // 55% of 2016 consensus.nMinimumChainWork = uint256{"0000000000000000000000000000000000000000000015f5e0c9f13455b0eb17"}; consensus.defaultAssumeValid = uint256{"00000000000003fc7967410ba2d0a8a8d50daedc318d43e8baf1a9782c236a57"}; // 3974606 @@ -660,6 +662,7 @@ class CRegTestParams : public CChainParams consensus.vDeployments[deployment_pos].min_activation_height = version_bits_params.min_activation_height; consensus.vDeployments[deployment_pos].max_activation_height = version_bits_params.max_activation_height; consensus.vDeployments[deployment_pos].active_duration = version_bits_params.active_duration; + consensus.vDeployments[deployment_pos].threshold = version_bits_params.threshold; } genesis = CreateGenesisBlock(1296688602, 2, 0x207fffff, 1, 50 * COIN); diff --git a/src/versionbits.cpp b/src/versionbits.cpp index d37c0139e7..bfd9152d3d 100644 --- a/src/versionbits.cpp +++ b/src/versionbits.cpp @@ -195,7 +195,11 @@ class VersionBitsConditionChecker : public AbstractThresholdConditionChecker { int MinActivationHeight(const Consensus::Params& params) const override { return params.vDeployments[id].min_activation_height; } int MaxActivationHeight(const Consensus::Params& params) const override { return params.vDeployments[id].max_activation_height; } int Period(const Consensus::Params& params) const override { return params.nMinerConfirmationWindow; } - int Threshold(const Consensus::Params& params) const override { return params.nRuleChangeActivationThreshold; } + int Threshold(const Consensus::Params& params) const override { + // Use per-deployment threshold if set, otherwise fall back to global + int deploymentThreshold = params.vDeployments[id].threshold; + return deploymentThreshold > 0 ? deploymentThreshold : params.nRuleChangeActivationThreshold; + } bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override { From f3db8b9fbf7e5e0e01715b8153fee6fa9843d1ca Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Mon, 29 Dec 2025 15:16:13 -0600 Subject: [PATCH 327/356] Support regtest vbparams for per-deployment threshold --- src/chainparams.cpp | 11 ++- src/kernel/chainparams.h | 1 + .../feature_bip9_max_activation_height.py | 68 ++++++++++++++++++- 3 files changed, 76 insertions(+), 4 deletions(-) diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 10b0947bed..c123fa5be5 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -77,8 +77,8 @@ void ReadRegTestArgs(const ArgsManager& args, CChainParams::RegTestOptions& opti for (const std::string& strDeployment : args.GetArgs("-vbparams")) { std::vector vDeploymentParams = SplitString(strDeployment, ':'); - if (vDeploymentParams.size() < 3 || 6 < vDeploymentParams.size()) { - throw std::runtime_error("Version bits parameters malformed, expecting deployment:start:end[:min_activation_height[:max_activation_height[:active_duration]]]"); + if (vDeploymentParams.size() < 3 || 7 < vDeploymentParams.size()) { + throw std::runtime_error("Version bits parameters malformed, expecting deployment:start:end[:min_activation_height[:max_activation_height[:active_duration[:threshold]]]]"); } CChainParams::VersionBitsParameters vbparams{}; if (!ParseInt64(vDeploymentParams[1], &vbparams.start_time)) { @@ -104,6 +104,11 @@ void ReadRegTestArgs(const ArgsManager& args, CChainParams::RegTestOptions& opti throw std::runtime_error(strprintf("Invalid active_duration (%s)", vDeploymentParams[5])); } } + if (vDeploymentParams.size() >= 7) { + if (!ParseInt32(vDeploymentParams[6], &vbparams.threshold)) { + throw std::runtime_error(strprintf("Invalid threshold (%s)", vDeploymentParams[6])); + } + } // Validate that timeout and max_activation_height are mutually exclusive if (vbparams.timeout != Consensus::BIP9Deployment::NO_TIMEOUT && vbparams.max_activation_height < std::numeric_limits::max()) { throw std::runtime_error(strprintf("Cannot specify both timeout (%ld) and max_activation_height (%d) for deployment %s. Use timeout for BIP9 or max_activation_height for mandatory activation deadline, not both.", vbparams.timeout, vbparams.max_activation_height, vDeploymentParams[0])); @@ -113,7 +118,7 @@ void ReadRegTestArgs(const ArgsManager& args, CChainParams::RegTestOptions& opti if (vDeploymentParams[0] == VersionBitsDeploymentInfo[j].name) { options.version_bits_parameters[Consensus::DeploymentPos(j)] = vbparams; found = true; - LogPrintf("Setting version bits activation parameters for %s to start=%ld, timeout=%ld, min_activation_height=%d, max_activation_height=%d, active_duration=%d\n", vDeploymentParams[0], vbparams.start_time, vbparams.timeout, vbparams.min_activation_height, vbparams.max_activation_height, vbparams.active_duration); + LogPrintf("Setting version bits activation parameters for %s to start=%ld, timeout=%ld, min_activation_height=%d, max_activation_height=%d, active_duration=%d, threshold=%d\n", vDeploymentParams[0], vbparams.start_time, vbparams.timeout, vbparams.min_activation_height, vbparams.max_activation_height, vbparams.active_duration, vbparams.threshold); break; } } diff --git a/src/kernel/chainparams.h b/src/kernel/chainparams.h index 4e70a3392e..09d2ddfee5 100644 --- a/src/kernel/chainparams.h +++ b/src/kernel/chainparams.h @@ -155,6 +155,7 @@ class CChainParams int min_activation_height; int max_activation_height{std::numeric_limits::max()}; int active_duration{std::numeric_limits::max()}; + int threshold{0}; // 0 means use global nRuleChangeActivationThreshold }; /** diff --git a/test/functional/feature_bip9_max_activation_height.py b/test/functional/feature_bip9_max_activation_height.py index ea68ca075e..257a3175d3 100755 --- a/test/functional/feature_bip9_max_activation_height.py +++ b/test/functional/feature_bip9_max_activation_height.py @@ -35,16 +35,19 @@ class MaxActivationHeightTest(BitcoinTestFramework): def set_test_params(self): - self.num_nodes = 5 # 5 nodes for tests 1-5 (test 0 validation is done separately) + self.num_nodes = 6 # 6 nodes for tests 1-6 (test 0 validation is done separately) self.setup_clean_chain = True # NO_TIMEOUT = std::numeric_limits::max() = 9223372036854775807 NO_TIMEOUT = '9223372036854775807' + # INT_MAX = std::numeric_limits::max() = 2147483647 + INT_MAX = '2147483647' self.extra_args = [ [f'-vbparams=testdummy:0:{NO_TIMEOUT}:0:576'], # Test 1: max_height=576 (shows full flow) ['-vbparams=testdummy:0:999999999999'], # Test 2: no max_height (uses timeout) [f'-vbparams=testdummy:0:{NO_TIMEOUT}:0:576'], # Test 3: max_height=576 (early activation) [f'-vbparams=testdummy:0:{NO_TIMEOUT}:0:432'], # Test 4: verify permanent ACTIVE [f'-vbparams=testdummy:0:{NO_TIMEOUT}:0:432:144'], # Test 5: max_height + active_duration + [f'-vbparams=testdummy:0:999999999999:0:{INT_MAX}:{INT_MAX}:72'], # Test 6: custom 50% threshold (72/144) ] def setup_network(self): @@ -401,6 +404,69 @@ def run_test(self): self.log.info("\n=== TEST 5 COMPLETE ===") self.log.info("SUCCESS: Temporary deployment with max_height activated and expired correctly") + # Test 6: Custom per-deployment threshold + self.log.info("\n\n=== TEST 6: Custom per-deployment threshold (50% = 72/144 blocks) ===") + node = self.nodes[5] + + # This node has threshold=72 (50% of 144 blocks) + # Default regtest threshold is 108 (75%), but this deployment should activate at 72 + + # Period 0 (0-143): DEFINED + self.log.info("Mining period 0 (blocks 0-143) without signaling...") + self.mine_blocks(node, 143, signal=False) + assert_equal(node.getblockcount(), 143) + status, _ = self.get_status(node) + assert_equal(status, 'defined') + + # Block 144: Transition to STARTED + self.log.info("Mining block 144 to transition to STARTED...") + self.mine_blocks(node, 1, signal=False) + assert_equal(node.getblockcount(), 144) + status, since = self.get_status(node) + self.log.info(f"Block 144: Status={status}, Since={since}") + assert_equal(status, 'started') + assert_equal(since, 144) + + # Period 1 (144-287): Mine exactly 72 signaling blocks (50%) + # With custom threshold of 72, this should be enough to lock in + self.log.info("Mining period 1 with exactly 72 signaling blocks (50%)...") + self.mine_blocks(node, 72, signal=True) # 72 signaling blocks + self.mine_blocks(node, 71, signal=False) # 71 non-signaling blocks + assert_equal(node.getblockcount(), 287) + status, since = self.get_status(node) + self.log.info(f"Block 287: Status={status}") + assert_equal(status, 'started') # Still started until next period boundary + + # Block 288: Should transition to LOCKED_IN (threshold met in previous period) + self.log.info("Mining block 288 to check lock-in...") + self.mine_blocks(node, 1, signal=False) + assert_equal(node.getblockcount(), 288) + status, since = self.get_status(node) + self.log.info(f"Block 288: Status={status}, Since={since}") + assert_equal(status, 'locked_in') + assert_equal(since, 288) + + # Mine through locked_in period to activate + self.log.info("Mining through locked_in period (289-431)...") + self.mine_blocks(node, 143, signal=False) + assert_equal(node.getblockcount(), 431) + status, since = self.get_status(node) + assert_equal(status, 'locked_in') + + # Block 432: Should transition to ACTIVE + self.log.info("Mining block 432 to activate...") + self.mine_blocks(node, 1, signal=False) + assert_equal(node.getblockcount(), 432) + status, since = self.get_status(node) + self.log.info(f"Block 432: Status={status}, Since={since}") + assert_equal(status, 'active') + assert_equal(since, 432) + + self.log.info("\n=== TEST 6 COMPLETE ===") + self.log.info("SUCCESS: Deployment activated with custom 50% threshold (72/144 blocks)") + self.log.info("- Custom threshold overrode default 75% threshold") + self.log.info("- Lock-in occurred with only 50% signaling support") + if __name__ == '__main__': MaxActivationHeightTest(__file__).main() From 66cbf5deb3989746647b78b1eb88bb33adbe3f03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C3=A9o=20Haf?= Date: Wed, 31 Dec 2025 12:32:49 +0100 Subject: [PATCH 328/356] net: ask DNS seed for x8000009 --- src/kernel/chainparams.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index f9999d9af8..158d1d2c6d 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -155,8 +155,8 @@ class CMainParams : public CChainParams { // release ASAP to avoid it where possible. vSeeds.emplace_back("seed.bitcoin.sipa.be."); // Pieter Wuille, only supports x1, x5, x9, and xd vSeeds.emplace_back("dnsseed.bluematt.me."); // Matt Corallo, only supports x9 - vSeeds.emplace_back("dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us."); // Luke Dashjr - vSeeds.emplace_back("seed.bitcoin.haf.ovh."); // Léo Haf + vSeeds.emplace_back("dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us."); // Luke Dashjr, support BIP110 seeding (x8000009) + vSeeds.emplace_back("seed.bitcoin.haf.ovh."); // Léo Haf, support BIP110 seeding (x8000009) vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch."); // Jonas Schnelli, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.btc.petertodd.net."); // Peter Todd, only supports x1, x5, x9, and xd vSeeds.emplace_back("seed.bitcoin.sprovoost.nl."); // Sjors Provoost From 336c5389e6b7d2c07beac1336ac20ae66cf2cb58 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Tue, 13 Jan 2026 14:53:47 -0600 Subject: [PATCH 329/356] net: allow up to 2 non-BIP110 outbound peers --- src/net.cpp | 14 +++++---- src/net.h | 8 +++-- src/net_processing.cpp | 26 ++++++++++++++-- src/test/peerman_tests.cpp | 15 ++++----- test/functional/p2p_handshake.py | 53 +++++++++++++++++++++++++++----- 5 files changed, 91 insertions(+), 25 deletions(-) diff --git a/src/net.cpp b/src/net.cpp index f5413dbfc9..6ba9c033ae 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -2324,7 +2324,7 @@ void CConnman::ThreadDNSAddressSeed() break; } - outbound_connection_count = GetFullOutboundConnCount(); + outbound_connection_count = GetBIP110FullOutboundConnCount(); if (outbound_connection_count >= SEED_OUTBOUND_CONNECTION_THRESHOLD) { LogPrintf("P2P peers available. Finished fetching data from seed nodes.\n"); break; @@ -2379,7 +2379,7 @@ void CConnman::ThreadDNSAddressSeed() if (!interruptNet.sleep_for(w)) return; to_wait -= w; - if (GetFullOutboundConnCount() >= SEED_OUTBOUND_CONNECTION_THRESHOLD) { + if (GetBIP110FullOutboundConnCount() >= SEED_OUTBOUND_CONNECTION_THRESHOLD) { if (found > 0) { LogPrintf("%d addresses found from DNS seeds\n", found); LogPrintf("P2P peers available. Finished DNS seeding.\n"); @@ -2492,14 +2492,15 @@ void CConnman::StartExtraBlockRelayPeers() m_start_extra_block_relay_peers = true; } -// Return the number of outbound connections that are full relay (not blocks only) -int CConnman::GetFullOutboundConnCount() const +// Return the number of BIP110 outbound connections that are full relay (not blocks only). +// Non-BIP110 outbound peers are excluded as they are "additional" and don't count toward limits. +int CConnman::GetBIP110FullOutboundConnCount() const { int nRelevant = 0; { LOCK(m_nodes_mutex); for (const CNode* pnode : m_nodes) { - if (pnode->fSuccessfullyConnected && pnode->IsFullOutboundConn()) ++nRelevant; + if (pnode->fSuccessfullyConnected && pnode->IsFullOutboundConn() && !pnode->m_is_non_bip110_outbound) ++nRelevant; } } return nRelevant; @@ -2707,7 +2708,8 @@ void CConnman::ThreadOpenConnections(const std::vector connect, Spa { LOCK(m_nodes_mutex); for (const CNode* pnode : m_nodes) { - if (pnode->IsFullOutboundConn()) nOutboundFullRelay++; + // Non-BIP110 outbound peers are "additional" - don't count toward limits + if (pnode->IsFullOutboundConn() && !pnode->m_is_non_bip110_outbound) nOutboundFullRelay++; if (pnode->IsBlockOnlyConn()) nOutboundBlockRelay++; // Make sure our persistent outbound slots to ipv4/ipv6 peers belong to different netgroups. diff --git a/src/net.h b/src/net.h index f913c4e07b..7184567d2a 100644 --- a/src/net.h +++ b/src/net.h @@ -865,6 +865,10 @@ class CNode /** Whether this peer provides all services that we want. Used for eviction decisions */ std::atomic_bool m_has_all_wanted_services{false}; + /** Whether this is a non-BIP110 outbound peer (lacks NODE_REDUCED_DATA). + * Used to exclude from outbound connection counts. Limited to 2 such peers. */ + std::atomic_bool m_is_non_bip110_outbound{false}; + /** Whether we should relay transactions to this peer. This only changes * from false to true. It will never change back to false. */ std::atomic_bool m_relays_txs{false}; @@ -1240,8 +1244,8 @@ class CConnman void StartExtraBlockRelayPeers(); - // Count the number of full-relay peer we have. - int GetFullOutboundConnCount() const; + // Count the number of BIP110 full-relay peers we have (excludes non-BIP110 peers). + int GetBIP110FullOutboundConnCount() const; // Return the number of outbound peers we have in excess of our target (eg, // if we previously called SetTryNewOutboundPeer(true), and have since set // to false, we may have extra peers that we wish to disconnect). This may diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 3747b820c8..ff15ad22bd 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -799,6 +799,9 @@ class PeerManagerImpl final : public PeerManager /** Number of peers with wtxid relay. */ std::atomic m_wtxid_relay_peers{0}; + /** Number of outbound peers without NODE_REDUCED_DATA (BIP-110). Limited to 2. */ + std::atomic m_num_non_bip110_outbound{0}; + /** Number of outbound peers with m_chain_sync.m_protect. */ int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; @@ -1594,6 +1597,11 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) assert(peer != nullptr); m_wtxid_relay_peers -= peer->m_wtxid_relay; assert(m_wtxid_relay_peers >= 0); + // Decrement non-BIP110 counter if this was a non-BIP110 outbound peer + if (node.m_is_non_bip110_outbound) { + --m_num_non_bip110_outbound; + assert(m_num_non_bip110_outbound >= 0); + } } CNodeState *state = State(nodeid); assert(state != nullptr); @@ -1657,14 +1665,13 @@ bool PeerManagerImpl::HasAllDesirableServiceFlags(ServiceFlags services) const ServiceFlags PeerManagerImpl::GetDesirableServiceFlags(ServiceFlags services) const { - // We want to preferentially peer with other nodes that enforce UASF-ReducedData, in case of a chain split if (services & NODE_NETWORK_LIMITED) { // Limited peers are desirable when we are close to the tip. if (ApproximateBestBlockDepth() < NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS) { - return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA); + return ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS); } } - return ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA); + return ServiceFlags(NODE_NETWORK | NODE_WITNESS); } PeerRef PeerManagerImpl::GetPeerRef(NodeId id) const @@ -3536,6 +3543,19 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } pfrom.m_has_all_wanted_services = HasAllDesirableServiceFlags(nServices); + // BIP-110: Allow up to 2 non-BIP110 outbound peers. + if (pfrom.ExpectServicesFromConn() && !(nServices & NODE_REDUCED_DATA)) { + if (m_num_non_bip110_outbound >= 2) { + LogDebug(BCLog::NET, "peer lacks NODE_REDUCED_DATA and already have 2 non-BIP110 outbound peers, %s\n", + pfrom.DisconnectMsg(fLogIPs)); + pfrom.fDisconnect = true; + return; + } + ++m_num_non_bip110_outbound; + pfrom.m_is_non_bip110_outbound = true; + LogDebug(BCLog::NET, "connected to non-BIP110 outbound peer (%d/2), %s\n", + m_num_non_bip110_outbound.load(), pfrom.ConnectionTypeAsString()); + } peer->m_their_services = nServices; pfrom.SetAddrLocal(addrMe); peer->m_starting_height = starting_height; diff --git a/src/test/peerman_tests.cpp b/src/test/peerman_tests.cpp index eaa0d2e905..052e8bf354 100644 --- a/src/test/peerman_tests.cpp +++ b/src/test/peerman_tests.cpp @@ -35,8 +35,9 @@ BOOST_AUTO_TEST_CASE(connections_desirable_service_flags) auto consensus = m_node.chainman->GetParams().GetConsensus(); // Check we start connecting to full nodes + // Note: NODE_REDUCED_DATA requirement is enforced separately in VERSION processing ServiceFlags peer_flags{NODE_WITNESS | NODE_NETWORK_LIMITED}; - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS)); // Make peerman aware of the initial best block and verify we accept limited peers when we start close to the tip time. auto tip = WITH_LOCK(::cs_main, return m_node.chainman->ActiveChain().Tip()); @@ -45,15 +46,15 @@ BOOST_AUTO_TEST_CASE(connections_desirable_service_flags) peerman->SetBestBlock(tip_block_height, std::chrono::seconds{tip_block_time}); SetMockTime(tip_block_time + 1); // Set node time to tip time - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS)); // Check we don't disallow limited peers connections when we are behind but still recoverable (below the connection safety window) SetMockTime(GetTime() + std::chrono::seconds{consensus.nPowTargetSpacing * (NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS - 1)}); - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS)); // Check we disallow limited peers connections when we are further than the limited peers safety window SetMockTime(GetTime() + std::chrono::seconds{consensus.nPowTargetSpacing * 2}); - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS)); // By now, we tested that the connections desirable services flags change based on the node's time proximity to the tip. // Now, perform the same tests for when the node receives a block. @@ -62,15 +63,15 @@ BOOST_AUTO_TEST_CASE(connections_desirable_service_flags) // First, verify a block in the past doesn't enable limited peers connections // At this point, our time is (NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS + 1) * 10 minutes ahead the tip's time. mineBlock(m_node, /*block_time=*/std::chrono::seconds{tip_block_time + 1}); - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS)); // Verify a block close to the tip enables limited peers connections mineBlock(m_node, /*block_time=*/GetTime()); - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK_LIMITED | NODE_WITNESS)); // Lastly, verify the stale tip checks can disallow limited peers connections after not receiving blocks for a prolonged period. SetMockTime(GetTime() + std::chrono::seconds{consensus.nPowTargetSpacing * NODE_NETWORK_LIMITED_ALLOW_CONN_BLOCKS + 1}); - BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA)); + BOOST_CHECK(peerman->GetDesirableServiceFlags(peer_flags) == ServiceFlags(NODE_NETWORK | NODE_WITNESS)); } BOOST_AUTO_TEST_SUITE_END() diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py index 257003859e..9bf054b982 100755 --- a/test/functional/p2p_handshake.py +++ b/test/functional/p2p_handshake.py @@ -35,8 +35,14 @@ # the desirable service flags for pruned peers are dynamic and only apply if # 1. the peer's service flag NODE_NETWORK_LIMITED is set *and* # 2. the local chain is close to the tip (<24h) -DESIRABLE_SERVICE_FLAGS_FULL = NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA -DESIRABLE_SERVICE_FLAGS_PRUNED = NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA + +# Base service flags (without BIP-110) +BASE_SERVICE_FLAGS_FULL = NODE_NETWORK | NODE_WITNESS +BASE_SERVICE_FLAGS_PRUNED = NODE_NETWORK_LIMITED | NODE_WITNESS + +# Full service flags (with BIP-110) +FULL_SERVICE_FLAGS_FULL = NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA +FULL_SERVICE_FLAGS_PRUNED = NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA class P2PHandshakeTest(BitcoinTestFramework): @@ -96,19 +102,52 @@ def generate_at_mocktime(self, time): def run_test(self): node = self.nodes[0] - self.log.info("Check that lacking desired service flags leads to disconnect (non-pruned peers)") + + self.log.info("Check that peers lacking base service flags are disconnected") + # These should always be disconnected regardless of BIP-110 counter self.test_desirable_service_flags(node, [NODE_NONE, NODE_NETWORK, NODE_WITNESS], - DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True) + BASE_SERVICE_FLAGS_FULL, expect_disconnect=True) + + self.log.info("Check that first 2 non-BIP110 peers connect, 3rd is rejected") + # Connect first 2 non-BIP110 peers and keep them connected + non_bip110_services = NODE_NETWORK | NODE_WITNESS + if self.options.v2transport: + non_bip110_services |= NODE_P2P_V2 + peer1 = node.add_outbound_p2p_connection( + P2PInterface(), p2p_idx=0, wait_for_disconnect=False, + connection_type="outbound-full-relay", services=non_bip110_services, + supports_v2_p2p=self.options.v2transport, advertise_v2_p2p=self.options.v2transport) + peer1.sync_with_ping() + peer2 = node.add_outbound_p2p_connection( + P2PInterface(), p2p_idx=1, wait_for_disconnect=False, + connection_type="outbound-full-relay", services=non_bip110_services, + supports_v2_p2p=self.options.v2transport, advertise_v2_p2p=self.options.v2transport) + peer2.sync_with_ping() + assert len(node.getpeerinfo()) == 2 + # Third non-BIP110 peer should be rejected + with node.assert_debug_log(["peer lacks NODE_REDUCED_DATA and already have 2 non-BIP110 outbound peers"]): + node.add_outbound_p2p_connection( + P2PInterface(), p2p_idx=2, wait_for_disconnect=True, + connection_type="outbound-full-relay", services=non_bip110_services, + supports_v2_p2p=self.options.v2transport, advertise_v2_p2p=self.options.v2transport) + # Clean up - disconnect the 2 non-BIP110 peers + peer1.peer_disconnect() + peer2.peer_disconnect() + peer1.wait_for_disconnect() + peer2.wait_for_disconnect() + self.wait_until(lambda: len(node.getpeerinfo()) == 0) + + self.log.info("Check that BIP110 peers always connect") self.test_desirable_service_flags(node, [NODE_NETWORK | NODE_WITNESS | NODE_REDUCED_DATA], - DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=False) + BASE_SERVICE_FLAGS_FULL, expect_disconnect=False) self.log.info("Check that limited peers are only desired if the local chain is close to the tip (<24h)") self.generate_at_mocktime(int(time.time()) - 25 * 3600) # tip outside the 24h window, should fail self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA], - DESIRABLE_SERVICE_FLAGS_FULL, expect_disconnect=True) + BASE_SERVICE_FLAGS_FULL, expect_disconnect=True) self.generate_at_mocktime(int(time.time()) - 23 * 3600) # tip inside the 24h window, should succeed self.test_desirable_service_flags(node, [NODE_NETWORK_LIMITED | NODE_WITNESS | NODE_REDUCED_DATA], - DESIRABLE_SERVICE_FLAGS_PRUNED, expect_disconnect=False) + BASE_SERVICE_FLAGS_PRUNED, expect_disconnect=False) self.log.info("Check that feeler connections get disconnected immediately") with node.assert_debug_log(["feeler connection completed"]): From 2146661f6b7f6505f24195ad5bcdb6d53fca8e2e Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Tue, 13 Jan 2026 21:34:02 -0600 Subject: [PATCH 330/356] test: add two-node chain split and reorg tests for temporary deployment --- ...ature_reduced_data_temporary_deployment.py | 332 +++++++++++------- 1 file changed, 204 insertions(+), 128 deletions(-) diff --git a/test/functional/feature_reduced_data_temporary_deployment.py b/test/functional/feature_reduced_data_temporary_deployment.py index c89b679562..f6a4641ab8 100755 --- a/test/functional/feature_reduced_data_temporary_deployment.py +++ b/test/functional/feature_reduced_data_temporary_deployment.py @@ -8,16 +8,24 @@ after the specified number of blocks. We use REDUCED_DATA as the test deployment with active_duration=144 blocks. -The test verifies two critical behaviors: -1. Consensus rules ARE enforced during the active period (blocks 432-575) -2. Consensus rules STOP being enforced after expiry (block 576+) +The test uses two nodes: +- Node 0: BIP-110 enforcing (active_duration=144) +- Node 1: Non-BIP-110 (never active, simulates Bitcoin Core) + +The test verifies: +1. BIP9 state transitions: DEFINED -> STARTED -> LOCKED_IN -> ACTIVE +2. Consensus rules ARE enforced during the active period (blocks 432-575) +3. Chain split: BIP-110 node rejects invalid blocks, non-BIP-110 accepts +4. Reorg: Longer valid chain wins when nodes reconnect +5. Consensus rules STOP being enforced after expiry (block 576+) +6. Post-expiry convergence: Both nodes accept the same blocks Expected timeline: - Period 0 (blocks 0-143): DEFINED - Period 1 (blocks 144-287): STARTED (signaling happens here) - Period 2 (blocks 288-431): LOCKED_IN -- Period 3 (blocks 432-575): ACTIVE (144 blocks total, from activation_height 432 to 575 inclusive) -- Block 576+: EXPIRED (deployment no longer active, rules no longer enforced) +- Period 3 (blocks 432-575): ACTIVE (144 blocks, rules enforced on node0 only) +- Block 576+: EXPIRED (rules no longer enforced, nodes converge) """ from test_framework.blocktools import ( @@ -42,22 +50,28 @@ class TemporaryDeploymentTest(BitcoinTestFramework): def set_test_params(self): - self.num_nodes = 1 + self.num_nodes = 2 self.setup_clean_chain = True - # Set active_duration to 144 blocks (1 period) for REDUCED_DATA - # Format: deployment:start:end:min_activation_height:max_activation_height:active_duration - # start=0, timeout=999999999999, min_activation_height=0, max_activation_height=2147483647 (INT_MAX, disabled), active_duration=144 - self.extra_args = [[ - '-vbparams=reduced_data:0:999999999999:0:2147483647:144', - '-acceptnonstdtxn=1', - ]] - - def create_test_block(self, txs, signal=False): - """Create a block with the given transactions.""" - tip = self.nodes[0].getbestblockhash() - height = self.nodes[0].getblockcount() + 1 - tip_header = self.nodes[0].getblockheader(tip) - block_time = tip_header['time'] + 1 + # Node 0: BIP-110 with active_duration=144 blocks + # Node 1: BIP-110 never active (simulates Bitcoin Core) + # NEVER_ACTIVE = -2 for start_time prevents deployment from ever leaving DEFINED state + self.extra_args = [ + ['-vbparams=reduced_data:0:999999999999:0:2147483647:144', '-acceptnonstdtxn=1'], + ['-vbparams=reduced_data:-2:-1', '-acceptnonstdtxn=1'], + ] + + def setup_network(self): + self.setup_nodes() + self.connect_nodes(0, 1) + + def create_block_for_node(self, node, txs=None, signal=False, time_offset=0): + """Create a block for a specific node.""" + if txs is None: + txs = [] + tip = node.getbestblockhash() + height = node.getblockcount() + 1 + tip_header = node.getblockheader(tip) + block_time = tip_header['time'] + 1 + time_offset block = create_block(int(tip, 16), create_coinbase(height), ntime=block_time, txlist=txs) if signal: block.nVersion = VERSIONBITS_TOP_BITS | (1 << REDUCED_DATA_BIT) @@ -65,149 +79,211 @@ def create_test_block(self, txs, signal=False): block.solve() return block - def mine_blocks(self, count, signal=False): - """Mine count blocks, optionally signaling for REDUCED_DATA.""" + def mine_blocks_on_node(self, node, count, signal=False): + """Mine count blocks on a specific node.""" for _ in range(count): - block = self.create_test_block([], signal=signal) - self.nodes[0].submitblock(block.serialize().hex()) + block = self.create_block_for_node(node, signal=signal) + node.submitblock(block.serialize().hex()) - def create_tx_with_data(self, data_size): - """Create a transaction with OP_RETURN output of specified size.""" - # Start with a valid transaction from the wallet - tx_dict = self.wallet.create_self_transfer() + def create_tx_with_large_output(self, wallet): + """Create a transaction with 84-byte OP_RETURN (violates BIP-110's 83-byte limit).""" + tx_dict = wallet.create_self_transfer() tx = tx_dict['tx'] - - # Add an OP_RETURN output with specified data size - tx.vout.append(CTxOut(0, CScript([OP_RETURN, b'x' * data_size]))) + # 81 bytes data = 84-byte script (OP_RETURN + OP_PUSHDATA1 + len + data) + tx.vout.append(CTxOut(0, CScript([OP_RETURN, b'x' * 81]))) tx.rehash() - return tx - def get_deployment_status(self, deployment_info, deployment_name): - """Helper to get deployment status from getdeploymentinfo().""" - rd = deployment_info['deployments'][deployment_name] + def get_deployment_status(self, node): + """Get reduced_data deployment status.""" + info = node.getdeploymentinfo() + rd = info['deployments']['reduced_data'] if 'bip9' in rd: return rd['bip9']['status'], rd['bip9'].get('since', 'N/A') return rd.get('status'), rd.get('since', 'N/A') def run_test(self): - node = self.nodes[0] + node_bip110 = self.nodes[0] + node_core = self.nodes[1] - # MiniWallet provides a simple wallet for test transactions - self.wallet = MiniWallet(node) + wallet = MiniWallet(node_bip110) - self.log.info("Mining initial blocks to get spendable coins...") - self.generate(self.wallet, 101) + # ===================================================================== + # Phase 1: Build common chain through BIP9 state transitions + # ===================================================================== + self.log.info("Phase 1: Building common chain through BIP9 states") - # Get deployment info at genesis - info = node.getdeploymentinfo() - status, since = self.get_deployment_status(info, 'reduced_data') - self.log.info(f"Block 101 - Status: {status}, Since: {since}") - assert_equal(status, 'defined') + self.log.info("Mining initial blocks for spendable coins...") + self.generate(wallet, 101) + self.sync_all() - # Mine through period 0 (blocks 102-143) - should remain DEFINED - self.log.info("Mining through period 0 (blocks 102-143)...") - self.generate(node, 42) # Get to block 143 - info = node.getdeploymentinfo() - status, since = self.get_deployment_status(info, 'reduced_data') - self.log.info(f"Block 143 - Status: {status}") + status, _ = self.get_deployment_status(node_bip110) assert_equal(status, 'defined') - # Mine period 1 (blocks 144-287) with signaling - should transition to STARTED - self.log.info("Mining period 1 (blocks 144-287) with 100% signaling...") - self.mine_blocks(144, signal=True) - assert_equal(node.getblockcount(), 287) - info = node.getdeploymentinfo() - status, since = self.get_deployment_status(info, 'reduced_data') - self.log.info(f"Block 287 - Status: {status}") + # Mine to end of period 0 + self.log.info("Mining through period 0 (DEFINED)...") + self.generate(node_bip110, 42) + self.sync_all() + assert_equal(node_bip110.getblockcount(), 143) + + # Period 1: Signal for activation + self.log.info("Mining period 1 with signaling (STARTED)...") + self.mine_blocks_on_node(node_bip110, 144, signal=True) + self.sync_all() + assert_equal(node_bip110.getblockcount(), 287) + status, _ = self.get_deployment_status(node_bip110) assert_equal(status, 'started') - # Mine period 2 (blocks 288-431) - should transition to LOCKED_IN - self.log.info("Mining period 2 (blocks 288-431)...") - self.mine_blocks(144, signal=True) - assert_equal(node.getblockcount(), 431) - info = node.getdeploymentinfo() - status, since = self.get_deployment_status(info, 'reduced_data') - self.log.info(f"Block 431 - Status: {status}, Since: {since}") + # Period 2: Lock in + self.log.info("Mining period 2 (LOCKED_IN)...") + self.mine_blocks_on_node(node_bip110, 144, signal=True) + self.sync_all() + assert_equal(node_bip110.getblockcount(), 431) + status, since = self.get_deployment_status(node_bip110) assert_equal(status, 'locked_in') assert_equal(since, 288) - # Mine one more block to activate (block 432 starts period 3) - self.log.info("Mining block 432 (activation block)...") - self.mine_blocks(1) - assert_equal(node.getblockcount(), 432) - info = node.getdeploymentinfo() - status, since = self.get_deployment_status(info, 'reduced_data') + # ===================================================================== + # Phase 2: Test activation and chain split + # ===================================================================== + self.log.info("Phase 2: Testing activation and chain split behavior") + + # Mine block 432 (activation) + self.mine_blocks_on_node(node_bip110, 1) + self.sync_all() + assert_equal(node_bip110.getblockcount(), 432) + status, since = self.get_deployment_status(node_bip110) self.log.info(f"Block 432 - Status: {status}, Since: {since}") assert_equal(status, 'active') assert_equal(since, 432) - # Test that REDUCED_DATA rules are enforced at block 432 (first active block) - self.log.info("Testing REDUCED_DATA rules are enforced at block 432...") - tx_large_data = self.create_tx_with_data(81) - block_invalid = self.create_test_block([tx_large_data]) - result = node.submitblock(block_invalid.serialize().hex()) - self.log.info(f"Submitting block with 81-byte OP_RETURN at height 432: {result}") - # 81 bytes data becomes 84-byte script (OP_RETURN + OP_PUSHDATA1 + len + data), exceeds 83-byte limit - assert_equal(result, 'bad-txns-vout-script-toolarge') + # Disconnect nodes BEFORE creating invalid block to prevent P2P relay + # (Bitcoin Core relays blocks via compact blocks before full validation completes) + self.log.info("Disconnecting nodes for chain split test...") + self.disconnect_nodes(0, 1) - # Mine a valid block instead - tx_valid = self.create_tx_with_data(80) - block_valid = self.create_test_block([tx_valid]) - assert_equal(node.submitblock(block_valid.serialize().hex()), None) - assert_equal(node.getblockcount(), 433) + # Create the invalid block (84-byte OP_RETURN violates BIP-110's 83-byte limit) + self.log.info("Test: BIP-110 node rejects block with 84-byte OP_RETURN output") + tx_invalid = self.create_tx_with_large_output(wallet) + block_invalid = self.create_block_for_node(node_bip110, [tx_invalid]) - # Mine through most of the active period (blocks 434-574) - self.log.info("Mining through active period to block 574...") - self.generate(node, 141) # 434 to 574 - assert_equal(node.getblockcount(), 574) - info = node.getdeploymentinfo() - status, since = self.get_deployment_status(info, 'reduced_data') - self.log.info(f"Block 574 - Status: {status}") - assert_equal(status, 'active') + # Submit to BIP-110 node - should be rejected + result_bip110 = node_bip110.submitblock(block_invalid.serialize().hex()) + assert_equal(result_bip110, 'bad-txns-vout-script-toolarge') + assert_equal(node_bip110.getblockcount(), 432) + + # Submit to non-BIP-110 node - should be accepted + self.log.info("Test: Non-BIP-110 node accepts the same block") + result_core = node_core.submitblock(block_invalid.serialize().hex()) + assert_equal(result_core, None) + assert_equal(node_core.getblockcount(), 433) - # Test that REDUCED_DATA rules are still enforced at block 575 (last active block, 432 + 144 - 1) - self.log.info("Testing REDUCED_DATA rules are still enforced at block 575 (last active block)...") - tx_large_data = self.create_tx_with_data(81) - block_invalid = self.create_test_block([tx_large_data]) - result = node.submitblock(block_invalid.serialize().hex()) - self.log.info(f"Submitting block with 81-byte OP_RETURN at height 575: {result}") + # Chain split confirmed + self.log.info(f"Chain split: BIP-110={node_bip110.getblockcount()}, Core={node_core.getblockcount()}") + + # ===================================================================== + # Phase 3: Test reorg behavior + # ===================================================================== + self.log.info("Phase 3: Testing reorg behavior") + + # Non-BIP-110 extends its chain + self.log.info("Non-BIP-110 node extends chain with 3 more blocks...") + for i in range(3): + block = self.create_block_for_node(node_core, time_offset=i) + node_core.submitblock(block.serialize().hex()) + assert_equal(node_core.getblockcount(), 436) + + # BIP-110 node builds longer valid chain + self.log.info("BIP-110 node builds longer valid chain (5 blocks)...") + for i in range(5): + block = self.create_block_for_node(node_bip110, time_offset=i+10) + node_bip110.submitblock(block.serialize().hex()) + assert_equal(node_bip110.getblockcount(), 437) + + # Reconnect - non-BIP-110 should reorg to BIP-110's chain + self.log.info("Reconnecting nodes - expecting reorg...") + self.connect_nodes(0, 1) + self.sync_blocks() + + assert_equal(node_core.getbestblockhash(), node_bip110.getbestblockhash()) + assert_equal(node_core.getblockcount(), 437) + self.log.info(f"Reorg complete: both nodes at height {node_core.getblockcount()}") + + # ===================================================================== + # Phase 4: Test rules enforced until expiry + # ===================================================================== + self.log.info("Phase 4: Testing rules enforced until expiry") + + # Mine to block 574 (one before last active block) + # active_duration=144, activation at 432, so last active block is 432+144-1=575 + blocks_to_574 = 574 - node_bip110.getblockcount() + self.log.info(f"Mining {blocks_to_574} blocks to reach block 574...") + self.generate(node_bip110, blocks_to_574) + self.sync_all() + assert_equal(node_bip110.getblockcount(), 574) + + # Disconnect nodes to prevent compact block relay of invalid block + self.disconnect_nodes(0, 1) + + # Verify rules still enforced at block 575 (last active block) + self.log.info("Test: Rules still enforced at block 575 (last active block)") + tx_invalid = self.create_tx_with_large_output(wallet) + block_invalid = self.create_block_for_node(node_bip110, [tx_invalid]) + result = node_bip110.submitblock(block_invalid.serialize().hex()) assert_equal(result, 'bad-txns-vout-script-toolarge') # Mine valid block 575 (last active block) - tx_valid = self.create_tx_with_data(80) - block_valid = self.create_test_block([tx_valid]) - assert_equal(node.submitblock(block_valid.serialize().hex()), None) - assert_equal(node.getblockcount(), 575) - info = node.getdeploymentinfo() - status, since = self.get_deployment_status(info, 'reduced_data') - self.log.info(f"Block 575 - Status: {status}") - assert_equal(status, 'active') + block_valid = self.create_block_for_node(node_bip110) + node_bip110.submitblock(block_valid.serialize().hex()) + assert_equal(node_bip110.getblockcount(), 575) + + # Reconnect and sync + self.connect_nodes(0, 1) + self.sync_all() - # Test that REDUCED_DATA rules are NO LONGER enforced at block 576 (first expired block, 432 + 144) - self.log.info("Testing REDUCED_DATA rules are NOT enforced at block 576 (first expired block, 432 + 144)...") - tx_large_data = self.create_tx_with_data(81) - block_after_expiry = self.create_test_block([tx_large_data]) - result = node.submitblock(block_after_expiry.serialize().hex()) - self.log.info(f"Submitting block with 81-byte OP_RETURN at height 576: {result}") + # ===================================================================== + # Phase 5: Test expiry - rules no longer enforced + # ===================================================================== + self.log.info("Phase 5: Testing expiry - rules no longer enforced") + + # At block 576, deployment has expired (first expired block = 432 + 144) + self.log.info("Test: BIP-110 node accepts 'invalid' block at height 576 (expired)") + tx_invalid = self.create_tx_with_large_output(wallet) + block_after_expiry = self.create_block_for_node(node_bip110, [tx_invalid]) + result = node_bip110.submitblock(block_after_expiry.serialize().hex()) assert_equal(result, None) - assert_equal(node.getblockcount(), 576) + self.sync_all() + assert_equal(node_bip110.getblockcount(), 576) + + # ===================================================================== + # Phase 6: Test post-expiry convergence + # ===================================================================== + self.log.info("Phase 6: Testing post-expiry convergence") + + # Both nodes should accept the same "invalid" blocks now + self.log.info("Test: Both nodes accept 'invalid' blocks after expiry") + for i in range(5): + tx = self.create_tx_with_large_output(wallet) + block = self.create_block_for_node(node_bip110, [tx], time_offset=i) + result_bip110 = node_bip110.submitblock(block.serialize().hex()) + assert_equal(result_bip110, None) + self.sync_all() + assert_equal(node_core.getbestblockhash(), node_bip110.getbestblockhash()) + + final_height = node_bip110.getblockcount() + self.log.info(f"Final height: {final_height}, both nodes synced") + + # ===================================================================== + # Summary + # ===================================================================== + self.log.info("All tests passed:") + self.log.info(" - BIP9 state transitions (DEFINED -> STARTED -> LOCKED_IN -> ACTIVE)") + self.log.info(" - Chain split at activation (BIP-110 rejects, Core accepts)") + self.log.info(" - Reorg to longer valid chain on reconnect") + self.log.info(" - Rules enforced during active period (432-575)") + self.log.info(" - Rules not enforced after expiry (576+)") + self.log.info(" - Post-expiry convergence (both nodes accept same blocks)") - # Check deployment status after expiry - # Note: BIP9 status may still show 'active' but rules are no longer enforced - info = node.getdeploymentinfo() - status, since = self.get_deployment_status(info, 'reduced_data') - self.log.info(f"Block 576 - Status: {status}, Since: {since}") - - # Verify rules remain unenforced for several more blocks - self.log.info("Verifying REDUCED_DATA rules remain unenforced after expiry...") - for i in range(10): - tx_large = self.create_tx_with_data(81) - block = self.create_test_block([tx_large]) - result = node.submitblock(block.serialize().hex()) - assert_equal(result, None) - - self.log.info(f"Final block height: {node.getblockcount()}") if __name__ == '__main__': TemporaryDeploymentTest(__file__).main() From 42e70b4b62b182cde96ee2a63842608d7f856ef0 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Wed, 14 Jan 2026 12:48:15 -0600 Subject: [PATCH 331/356] consensus: apply output size limit to generation transactions --- src/consensus/tx_verify.cpp | 20 +++++--- src/consensus/tx_verify.h | 7 +++ src/validation.cpp | 12 ++++- test/functional/feature_rdts.py | 87 +++++++++++++++++++++++++++++++++ 4 files changed, 118 insertions(+), 8 deletions(-) diff --git a/src/consensus/tx_verify.cpp b/src/consensus/tx_verify.cpp index 91215f5a1d..84558503c7 100644 --- a/src/consensus/tx_verify.cpp +++ b/src/consensus/tx_verify.cpp @@ -161,6 +161,17 @@ int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& i return nSigOps; } +bool Consensus::CheckOutputSizes(const CTransaction& tx, TxValidationState& state) +{ + for (const auto& txout : tx.vout) { + if (txout.scriptPubKey.empty()) continue; + if (txout.scriptPubKey.size() > ((txout.scriptPubKey[0] == OP_RETURN) ? MAX_OUTPUT_DATA_SIZE : MAX_OUTPUT_SCRIPT_SIZE)) { + return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "bad-txns-vout-script-toolarge"); + } + } + return true; +} + bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& inputs, int nSpendHeight, CAmount& txfee, const CheckTxInputsRules rules) { // are the actual inputs available? @@ -170,13 +181,8 @@ bool Consensus::CheckTxInputs(const CTransaction& tx, TxValidationState& state, } // NOTE: CheckTransaction is arguably the more logical place to do this, but it's context-independent, so this is probably the next best place for now - if (rules.test(CheckTxInputsRules::OutputSizeLimit)) { - for (const auto& txout : tx.vout) { - if (txout.scriptPubKey.empty()) continue; - if (txout.scriptPubKey.size() > ((txout.scriptPubKey[0] == OP_RETURN) ? MAX_OUTPUT_DATA_SIZE : MAX_OUTPUT_SCRIPT_SIZE)) { - return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "bad-txns-vout-script-toolarge"); - } - } + if (rules.test(CheckTxInputsRules::OutputSizeLimit) && !CheckOutputSizes(tx, state)) { + return false; } CAmount nValueIn = 0; diff --git a/src/consensus/tx_verify.h b/src/consensus/tx_verify.h index 65e705abd4..8111a5b77a 100644 --- a/src/consensus/tx_verify.h +++ b/src/consensus/tx_verify.h @@ -42,6 +42,13 @@ class CheckTxInputsRules { }; namespace Consensus { +/** + * Check whether all outputs of this transaction satisfy size limits. + * Regular outputs must be <= MAX_OUTPUT_SCRIPT_SIZE (34 bytes). + * OP_RETURN outputs must be <= MAX_OUTPUT_DATA_SIZE (83 bytes). + */ +bool CheckOutputSizes(const CTransaction& tx, TxValidationState& state); + /** * Check whether all inputs of this transaction are valid (no double spends and amounts) * This does not modify the UTXO set. This does not check scripts and sigs. diff --git a/src/validation.cpp b/src/validation.cpp index 44298f9f9e..5aa8694558 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2903,7 +2903,17 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, ? m_chainman.m_versionbitscache.StateSinceHeight(pindex->pprev, params.GetConsensus(), Consensus::DEPLOYMENT_REDUCED_DATA) : std::numeric_limits::max(); - const auto chk_input_rules{DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_REDUCED_DATA) ? CheckTxInputsRules::OutputSizeLimit : CheckTxInputsRules::None}; + const CheckTxInputsRules chk_input_rules{DeploymentActiveAt(*pindex, m_chainman, Consensus::DEPLOYMENT_REDUCED_DATA) ? CheckTxInputsRules::OutputSizeLimit : CheckTxInputsRules::None}; + + // Check generation tx output sizes if REDUCED_DATA is active + if (chk_input_rules.test(CheckTxInputsRules::OutputSizeLimit)) { + TxValidationState tx_state; + if (!Consensus::CheckOutputSizes(*block.vtx[0], tx_state)) { + return state.Invalid(BlockValidationResult::BLOCK_CONSENSUS, + tx_state.GetRejectReason(), + tx_state.GetDebugMessage() + " in generation tx " + block.vtx[0]->GetHash().ToString()); + } + } std::vector prevheights; CAmount nFees = 0; diff --git a/test/functional/feature_rdts.py b/test/functional/feature_rdts.py index b360624592..b753f199dd 100755 --- a/test/functional/feature_rdts.py +++ b/test/functional/feature_rdts.py @@ -893,11 +893,98 @@ def test_tapscript_script_exemption(self): assert_equal(result['allowed'], True) self.log.info(" PASS: >256-byte tapleaf script accepted under reduced_data") + def test_generation_output_size_limit(self): + """Test that generation tx outputs are also subject to output size limits.""" + self.log.info("Testing generation tx output scriptPubKey size limits...") + + node = self.nodes[0] + + def create_block_with_generation_output(script_pubkey): + """Helper to create a block with a custom generation tx output script.""" + tip = node.getbestblockhash() + height = node.getblockcount() + 1 + tip_header = node.getblockheader(tip) + block_time = tip_header['time'] + 1 + coinbase = create_coinbase(height, script_pubkey=script_pubkey) + block = create_block(int(tip, 16), coinbase, ntime=block_time) + add_witness_commitment(block) + block.solve() + return block + + # Test 1: 34-byte P2WSH generation tx output (exactly at limit - should pass) + self.log.info(" Test: 34-byte P2WSH generation tx output (at limit)") + witness_program_32 = b'\x00' * 32 + script_p2wsh = CScript([OP_0, witness_program_32]) + assert_equal(len(script_p2wsh), 34) + + block_valid = create_block_with_generation_output(script_p2wsh) + result = node.submitblock(block_valid.serialize().hex()) + assert_equal(result, None) + self.log.info(" ✓ 34-byte P2WSH generation tx output accepted") + + # Test 2: 35-byte P2PK generation tx output (exceeds limit - should fail) + self.log.info(" Test: 35-byte P2PK generation tx output (exceeds limit)") + pubkey_33 = b'\x02' + b'\x00' * 32 # Compressed pubkey format + script_p2pk = CScript([pubkey_33, OP_CHECKSIG]) + assert_equal(len(script_p2pk), 35) + + block_invalid = create_block_with_generation_output(script_p2pk) + result = node.submitblock(block_invalid.serialize().hex()) + assert_equal(result, 'bad-txns-vout-script-toolarge') + self.log.info(" ✓ 35-byte P2PK generation tx output rejected") + + # Test 3: Generation tx with OP_RETURN at 83 bytes (at OP_RETURN limit - should pass) + self.log.info(" Test: Generation tx with 83-byte OP_RETURN extra output (at limit)") + # 80 bytes data = OP_RETURN (1) + push opcode (1) + data (80) = 82 bytes + # We need 83 bytes, so use 81 bytes of data with PUSHDATA1 + # OP_RETURN (1) + OP_PUSHDATA1 (1) + len (1) + data (80) = 83 bytes + data_80 = b'\x00' * 80 + script_opreturn_83 = CScript([OP_RETURN, data_80]) + # Verify we're at exactly 83 bytes (with CScript's encoding) + self.log.info(f" OP_RETURN script length: {len(script_opreturn_83)} bytes") + + # Create block with valid main output and OP_RETURN extra output + tip = node.getbestblockhash() + height = node.getblockcount() + 1 + tip_header = node.getblockheader(tip) + block_time = tip_header['time'] + 1 + coinbase = create_coinbase(height, extra_output_script=script_opreturn_83) + block_opreturn_valid = create_block(int(tip, 16), coinbase, ntime=block_time) + add_witness_commitment(block_opreturn_valid) + block_opreturn_valid.solve() + + result = node.submitblock(block_opreturn_valid.serialize().hex()) + if result is None: + self.log.info(" ✓ Generation tx with 83-byte OP_RETURN output accepted") + else: + self.log.info(f" Note: Generation tx OP_RETURN result: {result}") + + # Test 4: Generation tx with OP_RETURN at 84 bytes (exceeds limit - should fail) + self.log.info(" Test: Generation tx with 84-byte OP_RETURN extra output (exceeds limit)") + # 81 bytes data = OP_RETURN (1) + OP_PUSHDATA1 (1) + len (1) + data (81) = 84 bytes + data_81 = b'\x00' * 81 + script_opreturn_84 = CScript([OP_RETURN, data_81]) + self.log.info(f" OP_RETURN script length: {len(script_opreturn_84)} bytes") + + tip = node.getbestblockhash() + height = node.getblockcount() + 1 + tip_header = node.getblockheader(tip) + block_time = tip_header['time'] + 1 + coinbase = create_coinbase(height, extra_output_script=script_opreturn_84) + block_opreturn_invalid = create_block(int(tip, 16), coinbase, ntime=block_time) + add_witness_commitment(block_opreturn_invalid) + block_opreturn_invalid.solve() + + result = node.submitblock(block_opreturn_invalid.serialize().hex()) + assert_equal(result, 'bad-txns-vout-script-toolarge') + self.log.info(" ✓ Generation tx with 84-byte OP_RETURN output rejected") + def run_test(self): self.init_test() # Run all spec tests self.test_output_script_size_limit() + self.test_generation_output_size_limit() self.test_pushdata_size_limit() self.test_undefined_witness_versions() self.test_taproot_annex_rejection() From 2d9ca10a3fe39dc7d3d238e30d15b0e00aecbf98 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Mon, 26 Jan 2026 22:37:45 -0600 Subject: [PATCH 332/356] test: add versionbits unit tests for max_activation_height and active_duration --- src/test/versionbits_tests.cpp | 260 +++++++++++++++++++++++++++++++++ 1 file changed, 260 insertions(+) diff --git a/src/test/versionbits_tests.cpp b/src/test/versionbits_tests.cpp index 29240a45f0..7028a3d2c6 100644 --- a/src/test/versionbits_tests.cpp +++ b/src/test/versionbits_tests.cpp @@ -461,4 +461,264 @@ BOOST_FIXTURE_TEST_CASE(versionbits_computeblockversion, BlockVersionTest) } } +/** + * Test condition checker with max_activation_height for mandatory activation deadline. + * When max_activation_height is set, the deployment forces LOCKED_IN one period before + * max_activation_height, even if threshold signaling was not met. + */ +class TestMaxActivationHeightConditionChecker : public AbstractThresholdConditionChecker +{ +private: + mutable ThresholdConditionCache cache; + int m_max_activation_height; + +public: + explicit TestMaxActivationHeightConditionChecker(int max_height) : m_max_activation_height(max_height) {} + + int64_t BeginTime(const Consensus::Params& params) const override { return 0; } // Start immediately + int64_t EndTime(const Consensus::Params& params) const override { return Consensus::BIP9Deployment::NO_TIMEOUT; } + int Period(const Consensus::Params& params) const override { return 144; } + int Threshold(const Consensus::Params& params) const override { return 108; } // 75% + int MaxActivationHeight(const Consensus::Params& params) const override { return m_max_activation_height; } + bool Condition(const CBlockIndex* pindex, const Consensus::Params& params) const override { return (pindex->nVersion & 0x100); } + + ThresholdState GetStateFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateFor(pindexPrev, paramsDummy, cache); } + int GetStateSinceHeightFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateSinceHeightFor(pindexPrev, paramsDummy, cache); } + void ClearCache() { cache.clear(); } +}; + +BOOST_AUTO_TEST_CASE(versionbits_max_activation_height) +{ + // Test that max_activation_height forces LOCKED_IN one period before max_activation_height + // even without sufficient signaling. + // + // Timeline with period=144, max_activation_height=432: + // - Period 0 (0-143): DEFINED + // - Period 1 (144-287): STARTED (no signaling -> normally would stay STARTED) + // - Period 2 (288-431): LOCKED_IN (forced because 288 >= 432 - 144) + // - Period 3 (432+): ACTIVE + + std::vector blocks; + auto cleanup = [&blocks]() { + for (auto* b : blocks) delete b; + blocks.clear(); + }; + + // max_activation_height = 432 (period 3 start) + TestMaxActivationHeightConditionChecker checker(432); + + // Helper to create blocks + auto mine_block = [&blocks](int32_t nVersion) -> CBlockIndex* { + CBlockIndex* pindex = new CBlockIndex(); + pindex->nHeight = blocks.size(); + pindex->pprev = blocks.empty() ? nullptr : blocks.back(); + pindex->nTime = 1415926536 + 600 * pindex->nHeight; + pindex->nVersion = nVersion; + pindex->BuildSkip(); + blocks.push_back(pindex); + return pindex; + }; + + // Mine through period 0 (DEFINED) - 144 blocks (0-143) + for (int i = 0; i < 144; i++) { + mine_block(0); // No signaling + } + BOOST_CHECK_EQUAL(blocks.back()->nHeight, 143); + // At tip 143, next block (144) would be STARTED + BOOST_CHECK(checker.GetStateFor(blocks.back()) == ThresholdState::STARTED); + BOOST_CHECK_EQUAL(checker.GetStateSinceHeightFor(blocks.back()), 144); + + // Mine through period 1 (STARTED) without signaling - blocks 144-287 + for (int i = 0; i < 144; i++) { + mine_block(0); // No signaling + } + BOOST_CHECK_EQUAL(blocks.back()->nHeight, 287); + // At tip 287, next block (288) would be LOCKED_IN due to max_activation_height + // 288 >= 432 - 144, so forced LOCKED_IN + BOOST_CHECK(checker.GetStateFor(blocks.back()) == ThresholdState::LOCKED_IN); + BOOST_CHECK_EQUAL(checker.GetStateSinceHeightFor(blocks.back()), 288); + + // Mine through period 2 (LOCKED_IN) - blocks 288-431 + for (int i = 0; i < 144; i++) { + mine_block(0); + } + BOOST_CHECK_EQUAL(blocks.back()->nHeight, 431); + // At tip 431, next block (432) would be ACTIVE + BOOST_CHECK(checker.GetStateFor(blocks.back()) == ThresholdState::ACTIVE); + BOOST_CHECK_EQUAL(checker.GetStateSinceHeightFor(blocks.back()), 432); + + // Mine into period 3 (ACTIVE) - blocks 432+ + for (int i = 0; i < 10; i++) { + mine_block(0); + } + BOOST_CHECK_EQUAL(blocks.back()->nHeight, 441); + BOOST_CHECK(checker.GetStateFor(blocks.back()) == ThresholdState::ACTIVE); + BOOST_CHECK_EQUAL(checker.GetStateSinceHeightFor(blocks.back()), 432); + + cleanup(); + + // Test 2: Verify that signaling still works to activate earlier than max_activation_height + TestMaxActivationHeightConditionChecker checker2(1000); // max_activation_height far in future + + // Period 0: DEFINED + for (int i = 0; i < 144; i++) { + mine_block(0); + } + BOOST_CHECK(checker2.GetStateFor(blocks.back()) == ThresholdState::STARTED); + + // Period 1: Signal 108+ blocks (threshold) + for (int i = 0; i < 108; i++) { + mine_block(0x100); // Signal + } + for (int i = 0; i < 36; i++) { + mine_block(0); // No signal + } + BOOST_CHECK_EQUAL(blocks.back()->nHeight, 287); + // Should be LOCKED_IN via signaling, not via max_activation_height + BOOST_CHECK(checker2.GetStateFor(blocks.back()) == ThresholdState::LOCKED_IN); + BOOST_CHECK_EQUAL(checker2.GetStateSinceHeightFor(blocks.back()), 288); + + // Period 2: LOCKED_IN -> ACTIVE + for (int i = 0; i < 144; i++) { + mine_block(0); + } + BOOST_CHECK(checker2.GetStateFor(blocks.back()) == ThresholdState::ACTIVE); + BOOST_CHECK_EQUAL(checker2.GetStateSinceHeightFor(blocks.back()), 432); + + cleanup(); +} + +BOOST_AUTO_TEST_CASE(versionbits_max_activation_height_boundary) +{ + // Test edge case: verify exact boundary where LOCKED_IN is forced + // With period=144 and max_activation_height=432: + // - At height 287, next block is 288, which is >= 432-144=288, so LOCKED_IN + // - At height 286, next block is 287, which is < 288, so would stay STARTED + + std::vector blocks; + auto cleanup = [&blocks]() { + for (auto* b : blocks) delete b; + blocks.clear(); + }; + + TestMaxActivationHeightConditionChecker checker(432); + + auto mine_block = [&blocks](int32_t nVersion) -> CBlockIndex* { + CBlockIndex* pindex = new CBlockIndex(); + pindex->nHeight = blocks.size(); + pindex->pprev = blocks.empty() ? nullptr : blocks.back(); + pindex->nTime = 1415926536 + 600 * pindex->nHeight; + pindex->nVersion = nVersion; + pindex->BuildSkip(); + blocks.push_back(pindex); + return pindex; + }; + + // Mine to height 143 (end of period 0) + for (int i = 0; i < 144; i++) { + mine_block(0); + } + BOOST_CHECK_EQUAL(blocks.back()->nHeight, 143); + // State for block 144 is STARTED + BOOST_CHECK(checker.GetStateFor(blocks.back()) == ThresholdState::STARTED); + + // Mine period 1 without signaling (blocks 144-287) + // But stop at block 286 first to check boundary + for (int i = 0; i < 143; i++) { + mine_block(0); + } + BOOST_CHECK_EQUAL(blocks.back()->nHeight, 286); + // At tip 286, next block 287 is still in STARTED period + // State is still STARTED + BOOST_CHECK(checker.GetStateFor(blocks.back()) == ThresholdState::STARTED); + + // Mine block 287 (last block of period 1) + mine_block(0); + BOOST_CHECK_EQUAL(blocks.back()->nHeight, 287); + // At tip 287, state for next block (288) is computed + // 288 >= 432 - 144 = 288, so LOCKED_IN + BOOST_CHECK(checker.GetStateFor(blocks.back()) == ThresholdState::LOCKED_IN); + BOOST_CHECK_EQUAL(checker.GetStateSinceHeightFor(blocks.back()), 288); + + cleanup(); +} + +BOOST_FIXTURE_TEST_CASE(versionbits_active_duration, BasicTestingSetup) +{ + // Test active_duration parameter via -vbparams + // Format: deployment:start:timeout:min_activation_height:max_activation_height:active_duration + // + // This tests that the parameter is parsed correctly. The actual expiry logic + // is tested in DeploymentActiveAt/DeploymentActiveAfter which use active_duration. + + { + ArgsManager args; + // start=0, timeout=never, min_height=0, max_height=INT_MAX (disabled), active_duration=144 + args.ForceSetArg("-vbparams", "testdummy:0:999999999999:0:2147483647:144"); + const auto chainParams = CreateChainParams(args, ChainType::REGTEST); + const auto& deployment = chainParams->GetConsensus().vDeployments[Consensus::DEPLOYMENT_TESTDUMMY]; + + BOOST_CHECK_EQUAL(deployment.nStartTime, 0); + BOOST_CHECK_EQUAL(deployment.nTimeout, 999999999999); + BOOST_CHECK_EQUAL(deployment.min_activation_height, 0); + BOOST_CHECK_EQUAL(deployment.max_activation_height, std::numeric_limits::max()); + BOOST_CHECK_EQUAL(deployment.active_duration, 144); + } + + { + ArgsManager args; + // Test with max_activation_height set + // start=0, timeout=NO_TIMEOUT, min_height=288, max_height=432, active_duration=1000 + // NO_TIMEOUT = INT64_MAX = 9223372036854775807 + args.ForceSetArg("-vbparams", "testdummy:0:9223372036854775807:288:432:1000"); + const auto chainParams = CreateChainParams(args, ChainType::REGTEST); + const auto& deployment = chainParams->GetConsensus().vDeployments[Consensus::DEPLOYMENT_TESTDUMMY]; + + BOOST_CHECK_EQUAL(deployment.min_activation_height, 288); + BOOST_CHECK_EQUAL(deployment.max_activation_height, 432); + BOOST_CHECK_EQUAL(deployment.active_duration, 1000); + } + + { + ArgsManager args; + // Test permanent deployment (active_duration = INT_MAX) + args.ForceSetArg("-vbparams", "testdummy:0:999999999999:0:2147483647:2147483647"); + const auto chainParams = CreateChainParams(args, ChainType::REGTEST); + const auto& deployment = chainParams->GetConsensus().vDeployments[Consensus::DEPLOYMENT_TESTDUMMY]; + + BOOST_CHECK_EQUAL(deployment.active_duration, std::numeric_limits::max()); + } +} + +BOOST_FIXTURE_TEST_CASE(versionbits_max_activation_height_parsing, BasicTestingSetup) +{ + // Test max_activation_height parameter via -vbparams + + { + ArgsManager args; + // Test with max_activation_height=432 (mandatory activation deadline) + // NO_TIMEOUT = INT64_MAX = 9223372036854775807 + args.ForceSetArg("-vbparams", "testdummy:0:9223372036854775807:0:432:2147483647"); + const auto chainParams = CreateChainParams(args, ChainType::REGTEST); + const auto& deployment = chainParams->GetConsensus().vDeployments[Consensus::DEPLOYMENT_TESTDUMMY]; + + BOOST_CHECK_EQUAL(deployment.max_activation_height, 432); + // active_duration should be permanent when not specified differently + BOOST_CHECK_EQUAL(deployment.active_duration, std::numeric_limits::max()); + } + + { + ArgsManager args; + // Test combined: max_activation_height + active_duration (RDTS) + // NO_TIMEOUT = INT64_MAX = 9223372036854775807 + args.ForceSetArg("-vbparams", "testdummy:0:9223372036854775807:288:576:144"); + const auto chainParams = CreateChainParams(args, ChainType::REGTEST); + const auto& deployment = chainParams->GetConsensus().vDeployments[Consensus::DEPLOYMENT_TESTDUMMY]; + + BOOST_CHECK_EQUAL(deployment.min_activation_height, 288); + BOOST_CHECK_EQUAL(deployment.max_activation_height, 576); + BOOST_CHECK_EQUAL(deployment.active_duration, 144); + } +} + BOOST_AUTO_TEST_SUITE_END() From 5724abb6f9b2a86db8739d88f0e439edababd112 Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Thu, 12 Feb 2026 00:28:04 -0600 Subject: [PATCH 333/356] script: require empty witness for P2A spends --- src/script/interpreter.cpp | 2 +- test/functional/feature_rdts.py | 61 +++++++++++++++++++++++++++++++++ 2 files changed, 62 insertions(+), 1 deletion(-) diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index c823a615e3..eab89bbdbc 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -2001,7 +2001,7 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, } return set_success(serror); } - } else if (!is_p2sh && CScript::IsPayToAnchor(witversion, program)) { + } else if (stack.empty() && !is_p2sh && CScript::IsPayToAnchor(witversion, program)) { return true; } else { if (flags & SCRIPT_VERIFY_DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM) { diff --git a/test/functional/feature_rdts.py b/test/functional/feature_rdts.py index b753f199dd..81e8495328 100755 --- a/test/functional/feature_rdts.py +++ b/test/functional/feature_rdts.py @@ -80,6 +80,7 @@ add_witness_commitment, ) from test_framework.script_util import ( + PAY_TO_ANCHOR, script_to_p2wsh_script, script_to_p2sh_script, ) @@ -979,6 +980,65 @@ def create_block_with_generation_output(script_pubkey): assert_equal(result, 'bad-txns-vout-script-toolarge') self.log.info(" ✓ Generation tx with 84-byte OP_RETURN output rejected") + def test_p2a_witness_rejected(self): + """Test that P2A (PayToAnchor) spends with non-empty witness are rejected.""" + self.log.info("Testing P2A non-empty witness rejection...") + node = self.nodes[0] + + # Create a P2A output (4 bytes, within the 34-byte limit) + p2a_funding = self.create_test_transaction(PAY_TO_ANCHOR) + p2a_funding.rehash() + p2a_value = p2a_funding.vout[0].nValue + + block_height = node.getblockcount() + 1 + block = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block.vtx.append(p2a_funding) + add_witness_commitment(block) + block.solve() + assert_equal(node.submitblock(block.serialize().hex()), None) + self.log.info(" P2A output created") + + # Test 1: Spend with 100 KB of arbitrary witness data (must be rejected) + self.log.info(" Test: P2A spend with large arbitrary witness (should be rejected)") + arbitrary_data = b'\xab' * 100_000 + + p2a_spend = CTransaction() + p2a_spend.vin = [CTxIn(COutPoint(int(p2a_funding.rehash(), 16), 0))] + p2a_spend.vout = [CTxOut(p2a_value - 1000, CScript([OP_0, hash160(b'\x01' * 33)]))] + p2a_spend.wit.vtxinwit = [CTxInWitness()] + p2a_spend.wit.vtxinwit[0].scriptWitness.stack = [arbitrary_data] + p2a_spend.rehash() + + block_height = node.getblockcount() + 1 + block_bad = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block_bad.vtx.append(p2a_spend) + add_witness_commitment(block_bad) + block_bad.solve() + + result = node.submitblock(block_bad.serialize().hex()) + assert result is not None + assert_equal(node.getblockcount(), block_height - 1) + self.log.info(f" ✓ P2A spend with 100 KB witness rejected ({result})") + + # Test 2: Spend with empty witness (must still be accepted) + self.log.info(" Test: P2A spend with empty witness (should be accepted)") + p2a_spend_empty = CTransaction() + p2a_spend_empty.vin = [CTxIn(COutPoint(int(p2a_funding.rehash(), 16), 0))] + p2a_spend_empty.vout = [CTxOut(p2a_value - 1000, CScript([OP_0, hash160(b'\x01' * 33)]))] + p2a_spend_empty.wit.vtxinwit = [CTxInWitness()] + p2a_spend_empty.wit.vtxinwit[0].scriptWitness.stack = [] + p2a_spend_empty.rehash() + + block_height = node.getblockcount() + 1 + block_good = create_block(int(node.getbestblockhash(), 16), create_coinbase(block_height), int(node.getblockheader(node.getbestblockhash())['time']) + 1) + block_good.vtx.append(p2a_spend_empty) + add_witness_commitment(block_good) + block_good.solve() + + assert_equal(node.submitblock(block_good.serialize().hex()), None) + assert_equal(node.getblockcount(), block_height) + self.log.info(" ✓ P2A spend with empty witness accepted") + def run_test(self): self.init_test() @@ -992,6 +1052,7 @@ def run_test(self): self.test_op_success_rejection() self.test_op_if_notif_rejection() self.test_mandatory_flags_cannot_be_bypassed() + self.test_p2a_witness_rejected() self.test_p2wsh_multisig_witness_script_exemption() self.test_tapscript_script_exemption() From 1f69e534a9231c5f54416b8295bd9a5782249ddc Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Fri, 6 Feb 2026 19:12:52 -0600 Subject: [PATCH 334/356] chainparams: enable BIP-110 deployment on testnet4 --- src/kernel/chainparams.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp index 158d1d2c6d..abd69ecda3 100644 --- a/src/kernel/chainparams.cpp +++ b/src/kernel/chainparams.cpp @@ -396,10 +396,13 @@ class CTestNet4Params : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_TAPROOT].min_activation_height = 0; // No activation delay + // ReducedData Temporary Softfork (RDTS) consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].bit = 4; - consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = Consensus::BIP9Deployment::NEVER_ACTIVE; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nStartTime = 1764547200; // December 1st, 2025 consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].nTimeout = Consensus::BIP9Deployment::NO_TIMEOUT; consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].min_activation_height = 0; + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].active_duration = 52416; // ~1 year + consensus.vDeployments[Consensus::DEPLOYMENT_REDUCED_DATA].threshold = 1109; // 55% of 2016 consensus.nMinimumChainWork = uint256{"0000000000000000000000000000000000000000000001d6dce8651b6094e4c1"}; consensus.defaultAssumeValid = uint256{"0000000000003ed4f08dbdf6f7d6b271a6bcffce25675cb40aa9fa43179a89f3"}; // 72600 From f426b61172b894fe3eb3af54ca12e0234a3846f0 Mon Sep 17 00:00:00 2001 From: Luke Dashjr Date: Fri, 13 Feb 2026 19:49:15 +0000 Subject: [PATCH 335/356] Bugfix: validation: Do not cache the result of CheckInputScripts if flags_per_input is used (and avoid using it when unnecessary) --- src/validation.cpp | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/validation.cpp b/src/validation.cpp index 5aa8694558..3a0459971a 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2407,9 +2407,9 @@ ValidationCache::ValidationCache(const size_t script_execution_cache_bytes, cons * This involves ECDSA signature checks so can be computationally intensive. This function should * only be called after the cheap sanity checks in CheckTxInputs passed. * - * WARNING: flags_per_input deviations from flags must be handled with care. Under no - * circumstances should they allow a script to pass that might not pass with the same - * `flags` parameter (which is used for the cache). + * WARNING: flags_per_input deviations from flags must be handled with care. It should only be more + * relaxed than flags, never stricter (or a cached result could be wrong). Do not provide + * flags_per_input if every input uses the same flags, or the result will not be cached. * * If pvChecks is not nullptr, script checks are pushed onto it instead of being performed inline. Any * script checks which are not necessary (eg due to script execution cache hits) are, obviously, @@ -2493,7 +2493,7 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, } } - if (cacheFullScriptStore && !pvChecks) { + if (cacheFullScriptStore && (!pvChecks) && flags_per_input.empty()) { // We executed all of the provided scripts, and were told to // cache the result. Do so now. validation_cache.m_script_execution_cache.insert(hashCacheEntry); @@ -2950,10 +2950,13 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, // BIP68 lock checks (as opposed to nLockTime checks) must // be in ConnectBlock because they require the UTXO set prevheights.resize(tx.vin.size()); - flags_per_input.resize(tx.vin.size()); + flags_per_input.clear(); for (size_t j = 0; j < tx.vin.size(); j++) { prevheights[j] = view.AccessCoin(tx.vin[j].prevout).nHeight; - flags_per_input[j] = (prevheights[j] < reduced_data_start_height) ? (flags & ~REDUCED_DATA_MANDATORY_VERIFY_FLAGS) : flags; + if (prevheights[j] < reduced_data_start_height) { + flags_per_input.resize(tx.vin.size(), flags); + flags_per_input[j] = flags & ~REDUCED_DATA_MANDATORY_VERIFY_FLAGS; + } } if (!SequenceLocks(tx, nLockTimeFlags, prevheights, *pindex)) { From 4e863a5aeae9e101cba45199331652d75c543d0f Mon Sep 17 00:00:00 2001 From: Dathon Ohm Date: Fri, 13 Feb 2026 15:04:54 -0600 Subject: [PATCH 336/356] test: add unit and functional tests for CheckInputScripts cache-poisoning via activation-boundary reorg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-Authored-By: Lőrinc --- src/test/txvalidationcache_tests.cpp | 74 +++++++++++++++++++ .../feature_reduced_data_utxo_height.py | 54 +++++++++++++- 2 files changed, 127 insertions(+), 1 deletion(-) diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index 11e48b9f53..cf93d4702c 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -2,9 +2,11 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include #include #include #include +#include