Version in base suite: 4.9.7-1 Base version: pdns_4.9.7-1 Target version: pdns_4.9.14-0+deb13u1 Base file: /srv/ftp-master.debian.org/ftp/pool/main/p/pdns/pdns_4.9.7-1.dsc Target file: /srv/ftp-master.debian.org/policy/pool/main/p/pdns/pdns_4.9.14-0+deb13u1.dsc configure | 34 - configure.ac | 2 debian/changelog | 10 debian/gbp.conf | 2 debian/watch | 2 docs/Makefile.am | 2 docs/Makefile.in | 2 docs/calidns.1 | 2 docs/dnsbulktest.1 | 2 docs/dnsgram.1 | 2 docs/dnspcap2calidns.1 | 2 docs/dnspcap2protobuf.1 | 2 docs/dnsreplay.1 | 2 docs/dnsscan.1 | 2 docs/dnsscope.1 | 2 docs/dnstcpbench.1 | 2 docs/dnswasher.1 | 2 docs/dumresp.1 | 2 docs/ixfrdist.1 | 2 docs/ixfrdist.yml.5 | 2 docs/ixplore.1 | 2 docs/nproxy.1 | 2 docs/nsec3dig.1 | 2 docs/pdns_control.1 | 2 docs/pdns_notify.1 | 2 docs/pdns_server.1 | 2 docs/pdnsutil.1 | 2 docs/saxfr.1 | 2 docs/sdig.1 | 2 docs/zone2json.1 | 2 docs/zone2ldap.1 | 2 docs/zone2sql.1 | 2 ext/yahttp/yahttp/reqresp.cpp | 29 + ext/yahttp/yahttp/reqresp.hpp | 16 ext/yahttp/yahttp/router.cpp | 8 m4/pdns_check_libcrypto.m4 | 2 m4/pdns_check_libcrypto_ecdsa.m4 | 4 m4/pdns_check_libcrypto_eddsa.m4 | 4 modules/bindbackend/bindbackend2.cc | 36 + modules/geoipbackend/geoipinterface-mmdb.cc | 3 modules/gmysqlbackend/gmysqlbackend.cc | 22 - modules/godbcbackend/godbcbackend.cc | 2 modules/godbcbackend/sodbc.cc | 39 + modules/gpgsqlbackend/gpgsqlbackend.cc | 14 modules/gsqlite3backend/gsqlite3backend.cc | 6 modules/ldapbackend/native.cc | 2 modules/ldapbackend/powerldap.cc | 74 +++ modules/lmdbbackend/lmdbbackend.cc | 353 +++++++++++------ modules/lmdbbackend/lmdbbackend.hh | 31 + pdns/Makefile.am | 2 pdns/Makefile.in | 2 pdns/api-swagger.json | 10 pdns/api-swagger.yaml | 9 pdns/auth-caches.cc | 9 pdns/backends/gsql/gsqlbackend.hh | 4 pdns/dns.hh | 2 pdns/dnspacket.cc | 2 pdns/dnswriter.cc | 13 pdns/lua-record.cc | 17 pdns/opensslsigners.cc | 2 pdns/rcpgenerator.cc | 18 pdns/requirements.txt | 1 pdns/rfc2136handler.cc | 132 ++---- pdns/test-dnsrecords_cc.cc | 1 pdns/unix_utility.cc | 3 pdns/ws-auth.cc | 555 +++++++++++++++++++--------- 66 files changed, 1029 insertions(+), 502 deletions(-) dpkg-source: warning: cannot verify inline signature for /srv/release.debian.org/tmp/tmp8gmf6mpa/pdns_4.9.7-1.dsc: no acceptable signature found dpkg-source: warning: cannot verify inline signature for /srv/release.debian.org/tmp/tmp8gmf6mpa/pdns_4.9.14-0+deb13u1.dsc: no acceptable signature found diff -Nru pdns-4.9.7/configure pdns-4.9.14/configure --- pdns-4.9.7/configure 2025-07-07 07:42:38.000000000 +0000 +++ pdns-4.9.14/configure 2026-04-08 09:58:43.000000000 +0000 @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.71 for pdns 4.9.7. +# Generated by GNU Autoconf 2.71 for pdns 4.9.14. # # # Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation, @@ -618,8 +618,8 @@ # Identity of this package. PACKAGE_NAME='pdns' PACKAGE_TARNAME='pdns' -PACKAGE_VERSION='4.9.7' -PACKAGE_STRING='pdns 4.9.7' +PACKAGE_VERSION='4.9.14' +PACKAGE_STRING='pdns 4.9.14' PACKAGE_BUGREPORT='' PACKAGE_URL='' @@ -1698,7 +1698,7 @@ # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures pdns 4.9.7 to adapt to many kinds of systems. +\`configure' configures pdns 4.9.14 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1769,7 +1769,7 @@ if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of pdns 4.9.7:";; + short | recursive ) echo "Configuration of pdns 4.9.14:";; esac cat <<\_ACEOF @@ -2040,7 +2040,7 @@ test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -pdns configure 4.9.7 +pdns configure 4.9.14 generated by GNU Autoconf 2.71 Copyright (C) 2021 Free Software Foundation, Inc. @@ -2529,7 +2529,7 @@ This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by pdns $as_me 4.9.7, which was +It was created by pdns $as_me 4.9.14, which was generated by GNU Autoconf 2.71. Invocation command line was $ $0$ac_configure_args_raw @@ -4027,7 +4027,7 @@ # Define the identity of the package. PACKAGE='pdns' - VERSION='4.9.7' + VERSION='4.9.14' printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h @@ -23339,6 +23339,7 @@ LIBCRYPTO_LIBS=`$PKG_CONFIG libcrypto --libs-only-l 2>/dev/null` LIBCRYPTO_INCLUDES=`$PKG_CONFIG libcrypto --cflags-only-I 2>/dev/null` ssldir=`$PKG_CONFIG libcrypto --variable=prefix 2>/dev/null` + sslincdir=`$PKG_CONFIG libcrypto --variable=includedir 2>/dev/null` found=true fi fi @@ -23365,6 +23366,7 @@ LIBCRYPTO_INCLUDES="-I$ssldir/include" LIBCRYPTO_LDFLAGS="-L$ssldir/lib" LIBCRYPTO_LIBS="-lcrypto" + sslincdir="$ssldir/include" found=true { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5 printf "%s\n" "yes" >&6; } @@ -23516,13 +23518,13 @@ # Find the headers we need for ECDSA libcrypto_ecdsa=yes - as_ac_Header=`printf "%s\n" "ac_cv_header_$ssldir/include/openssl/ecdsa.h" | $as_tr_sh` -ac_fn_cxx_check_header_compile "$LINENO" "$ssldir/include/openssl/ecdsa.h" "$as_ac_Header" "$ac_includes_default" + as_ac_Header=`printf "%s\n" "ac_cv_header_$sslincdir/openssl/ecdsa.h" | $as_tr_sh` +ac_fn_cxx_check_header_compile "$LINENO" "$sslincdir/openssl/ecdsa.h" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes" then : ac_fn_check_decl "$LINENO" "NID_X9_62_prime256v1" "ac_cv_have_decl_NID_X9_62_prime256v1" "$ac_includes_default -#include <$ssldir/include/openssl/evp.h> +#include <$sslincdir/openssl/evp.h> " "$ac_cxx_undeclared_builtin_options" "CXXFLAGS" if test "x$ac_cv_have_decl_NID_X9_62_prime256v1" = xyes @@ -23541,7 +23543,7 @@ fi ac_fn_check_decl "$LINENO" "NID_secp384r1" "ac_cv_have_decl_NID_secp384r1" "$ac_includes_default -#include <$ssldir/include/openssl/evp.h> +#include <$sslincdir/openssl/evp.h> " "$ac_cxx_undeclared_builtin_options" "CXXFLAGS" if test "x$ac_cv_have_decl_NID_secp384r1" = xyes @@ -23597,7 +23599,7 @@ libcrypto_ed25519=no libcrypto_ed448=no ac_fn_check_decl "$LINENO" "NID_ED25519" "ac_cv_have_decl_NID_ED25519" "$ac_includes_default - #include <$ssldir/include/openssl/evp.h> + #include <$sslincdir/openssl/evp.h> " "$ac_cxx_undeclared_builtin_options" "CXXFLAGS" if test "x$ac_cv_have_decl_NID_ED25519" = xyes then : @@ -23619,7 +23621,7 @@ fi ac_fn_check_decl "$LINENO" "NID_ED448" "ac_cv_have_decl_NID_ED448" "$ac_includes_default - #include <$ssldir/include/openssl/evp.h> + #include <$sslincdir/openssl/evp.h> " "$ac_cxx_undeclared_builtin_options" "CXXFLAGS" if test "x$ac_cv_have_decl_NID_ED448" = xyes then : @@ -32467,7 +32469,7 @@ # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by pdns $as_me 4.9.7, which was +This file was extended by pdns $as_me 4.9.14, which was generated by GNU Autoconf 2.71. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -32535,7 +32537,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config='$ac_cs_config_escaped' ac_cs_version="\\ -pdns config.status 4.9.7 +pdns config.status 4.9.14 configured by $0, generated by GNU Autoconf 2.71, with options \\"\$ac_cs_config\\" diff -Nru pdns-4.9.7/configure.ac pdns-4.9.14/configure.ac --- pdns-4.9.7/configure.ac 2025-07-07 07:42:29.000000000 +0000 +++ pdns-4.9.14/configure.ac 2026-04-08 09:58:31.000000000 +0000 @@ -1,6 +1,6 @@ AC_PREREQ([2.69]) -AC_INIT([pdns], [4.9.7]) +AC_INIT([pdns], [4.9.14]) AC_CONFIG_AUX_DIR([build-aux]) AM_INIT_AUTOMAKE([foreign dist-bzip2 no-dist-gzip tar-ustar -Wno-portability subdir-objects parallel-tests 1.11]) AM_SILENT_RULES([yes]) diff -Nru pdns-4.9.7/debian/changelog pdns-4.9.14/debian/changelog --- pdns-4.9.7/debian/changelog 2025-07-07 10:15:52.000000000 +0000 +++ pdns-4.9.14/debian/changelog 2026-04-26 19:40:16.000000000 +0000 @@ -1,3 +1,13 @@ +pdns (4.9.14-0+deb13u1) trixie-security; urgency=medium + + * New upstream version 4.9.14, fixing security issues CVE-2026-33257, + CVE-2026-33260, CVE-2026-33611, CVE-2026-33610, CVE-2026-33609, + CVE-2026-33608. + * d/gbp.conf: setup for trixie + * d/watch: restrict to 4.x series + + -- Chris Hofstaedtler Sun, 26 Apr 2026 21:40:16 +0200 + pdns (4.9.7-1) unstable; urgency=medium * New upstream version 4.9.7 diff -Nru pdns-4.9.7/debian/gbp.conf pdns-4.9.14/debian/gbp.conf --- pdns-4.9.7/debian/gbp.conf 2025-03-21 12:23:34.000000000 +0000 +++ pdns-4.9.14/debian/gbp.conf 2026-04-26 19:39:09.000000000 +0000 @@ -1,2 +1,4 @@ [DEFAULT] pristine-tar = True +debian-branch = debian/trixie +upstream-branch = upstream/trixie diff -Nru pdns-4.9.7/debian/watch pdns-4.9.14/debian/watch --- pdns-4.9.7/debian/watch 2025-03-21 12:23:34.000000000 +0000 +++ pdns-4.9.14/debian/watch 2026-04-26 19:39:35.000000000 +0000 @@ -1,3 +1,3 @@ # Site Directory Pattern Version Script version=3 -opts="versionmangle=s/-(alpha|beta|rc)/~$1/" https://downloads.powerdns.com/releases/ pdns-(\d+.*)\.tar\.bz2 debian uupdate +opts="versionmangle=s/-(alpha|beta|rc)/~$1/" https://downloads.powerdns.com/releases/ pdns-(4\.\d+.*)\.tar\.bz2 debian uupdate diff -Nru pdns-4.9.7/docs/Makefile.am pdns-4.9.14/docs/Makefile.am --- pdns-4.9.7/docs/Makefile.am 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/docs/Makefile.am 2026-04-08 09:58:17.000000000 +0000 @@ -68,7 +68,7 @@ .venv: requirements.txt $(PYTHON) -m venv .venv - .venv/bin/pip install -U pip setuptools setuptools-git wheel + .venv/bin/pip install -U pip "setuptools<82" setuptools-git wheel .venv/bin/pip install -r ${srcdir}/requirements.txt .NOTPARALLEL: \ diff -Nru pdns-4.9.7/docs/Makefile.in pdns-4.9.14/docs/Makefile.in --- pdns-4.9.7/docs/Makefile.in 2025-07-07 07:42:40.000000000 +0000 +++ pdns-4.9.14/docs/Makefile.in 2026-04-08 09:58:46.000000000 +0000 @@ -787,7 +787,7 @@ @HAVE_VENV_TRUE@.venv: requirements.txt @HAVE_VENV_TRUE@ $(PYTHON) -m venv .venv -@HAVE_VENV_TRUE@ .venv/bin/pip install -U pip setuptools setuptools-git wheel +@HAVE_VENV_TRUE@ .venv/bin/pip install -U pip "setuptools<82" setuptools-git wheel @HAVE_VENV_TRUE@ .venv/bin/pip install -r ${srcdir}/requirements.txt @HAVE_VENV_TRUE@.NOTPARALLEL: \ diff -Nru pdns-4.9.7/docs/calidns.1 pdns-4.9.14/docs/calidns.1 --- pdns-4.9.7/docs/calidns.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/calidns.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "CALIDNS" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "CALIDNS" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME calidns \- A DNS recursor testing tool .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/dnsbulktest.1 pdns-4.9.14/docs/dnsbulktest.1 --- pdns-4.9.7/docs/dnsbulktest.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/dnsbulktest.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNSBULKTEST" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "DNSBULKTEST" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME dnsbulktest \- A debugging tool for intermittent resolver failures .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/dnsgram.1 pdns-4.9.14/docs/dnsgram.1 --- pdns-4.9.7/docs/dnsgram.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/dnsgram.1 2026-04-08 09:59:47.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNSGRAM" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "DNSGRAM" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME dnsgram \- A debugging tool for intermittent resolver failures .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/dnspcap2calidns.1 pdns-4.9.14/docs/dnspcap2calidns.1 --- pdns-4.9.7/docs/dnspcap2calidns.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/dnspcap2calidns.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNSPCAP2CALIDNS" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "DNSPCAP2CALIDNS" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME dnspcap2calidns \- A tool to convert PCAPs of DNS traffic to calidns input .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/dnspcap2protobuf.1 pdns-4.9.14/docs/dnspcap2protobuf.1 --- pdns-4.9.7/docs/dnspcap2protobuf.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/dnspcap2protobuf.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNSPCAP2PROTOBUF" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "DNSPCAP2PROTOBUF" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME dnspcap2protobuf \- A tool to convert PCAPs of DNS traffic to PowerDNS Protobuf .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/dnsreplay.1 pdns-4.9.14/docs/dnsreplay.1 --- pdns-4.9.7/docs/dnsreplay.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/dnsreplay.1 2026-04-08 09:59:47.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNSREPLAY" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "DNSREPLAY" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME dnsreplay \- A PowerDNS nameserver debugging tool .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/dnsscan.1 pdns-4.9.14/docs/dnsscan.1 --- pdns-4.9.7/docs/dnsscan.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/dnsscan.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNSSCAN" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "DNSSCAN" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME dnsscan \- List the amount of queries per qtype in a pcap .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/dnsscope.1 pdns-4.9.14/docs/dnsscope.1 --- pdns-4.9.7/docs/dnsscope.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/dnsscope.1 2026-04-08 09:59:47.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNSSCOPE" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "DNSSCOPE" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME dnsscope \- A PowerDNS nameserver debugging tool .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/dnstcpbench.1 pdns-4.9.14/docs/dnstcpbench.1 --- pdns-4.9.7/docs/dnstcpbench.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/dnstcpbench.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNSTCPBENCH" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "DNSTCPBENCH" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME dnstcpbench \- tool to perform TCP benchmarking of nameservers .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/dnswasher.1 pdns-4.9.14/docs/dnswasher.1 --- pdns-4.9.7/docs/dnswasher.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/dnswasher.1 2026-04-08 09:59:47.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DNSWASHER" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "DNSWASHER" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME dnswasher \- A PowerDNS nameserver debugging tool .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/dumresp.1 pdns-4.9.14/docs/dumresp.1 --- pdns-4.9.7/docs/dumresp.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/dumresp.1 2026-04-08 09:59:47.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "DUMRESP" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "DUMRESP" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME dumresp \- A dumb DNS responder .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/ixfrdist.1 pdns-4.9.14/docs/ixfrdist.1 --- pdns-4.9.7/docs/ixfrdist.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/ixfrdist.1 2026-04-08 09:59:47.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "IXFRDIST" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "IXFRDIST" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME ixfrdist \- An IXFR/AXFR-only server that re-distributes zones .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/ixfrdist.yml.5 pdns-4.9.14/docs/ixfrdist.yml.5 --- pdns-4.9.7/docs/ixfrdist.yml.5 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/ixfrdist.yml.5 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "IXFRDIST.YML" "5" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "IXFRDIST.YML" "5" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME ixfrdist.yml \- The ixfrdist configuration file .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/ixplore.1 pdns-4.9.14/docs/ixplore.1 --- pdns-4.9.7/docs/ixplore.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/ixplore.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "IXPLORE" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "IXPLORE" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME ixplore \- A tool that provides insights into IXFRs .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/nproxy.1 pdns-4.9.14/docs/nproxy.1 --- pdns-4.9.7/docs/nproxy.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/nproxy.1 2026-04-08 09:59:47.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "NPROXY" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "NPROXY" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME nproxy \- DNS notification proxy .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/nsec3dig.1 pdns-4.9.14/docs/nsec3dig.1 --- pdns-4.9.7/docs/nsec3dig.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/nsec3dig.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "NSEC3DIG" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "NSEC3DIG" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME nsec3dig \- Show and validate NSEC3 proofs .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/pdns_control.1 pdns-4.9.14/docs/pdns_control.1 --- pdns-4.9.7/docs/pdns_control.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/pdns_control.1 2026-04-08 09:59:47.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "PDNS_CONTROL" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "PDNS_CONTROL" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME pdns_control \- Control the PowerDNS nameserver .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/pdns_notify.1 pdns-4.9.14/docs/pdns_notify.1 --- pdns-4.9.7/docs/pdns_notify.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/pdns_notify.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "PDNS_NOTIFY" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "PDNS_NOTIFY" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME pdns_notify \- A simple DNS NOTIFY sender .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/pdns_server.1 pdns-4.9.14/docs/pdns_server.1 --- pdns-4.9.7/docs/pdns_server.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/pdns_server.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "PDNS_SERVER" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "PDNS_SERVER" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME pdns_server \- The PowerDNS Authoritative Nameserver .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/pdnsutil.1 pdns-4.9.14/docs/pdnsutil.1 --- pdns-4.9.7/docs/pdnsutil.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/pdnsutil.1 2026-04-08 09:59:47.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "PDNSUTIL" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "PDNSUTIL" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME pdnsutil \- PowerDNS record and DNSSEC command and control .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/saxfr.1 pdns-4.9.14/docs/saxfr.1 --- pdns-4.9.7/docs/saxfr.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/saxfr.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SAXFR" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "SAXFR" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME saxfr \- Perform AXFRs and show information about it .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/sdig.1 pdns-4.9.14/docs/sdig.1 --- pdns-4.9.7/docs/sdig.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/sdig.1 2026-04-08 09:59:47.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "SDIG" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "SDIG" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME sdig \- Perform a DNS query and show the results .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/zone2json.1 pdns-4.9.14/docs/zone2json.1 --- pdns-4.9.7/docs/zone2json.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/zone2json.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "ZONE2JSON" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "ZONE2JSON" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME zone2json \- convert BIND zones to JSON .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/zone2ldap.1 pdns-4.9.14/docs/zone2ldap.1 --- pdns-4.9.7/docs/zone2ldap.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/zone2ldap.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "ZONE2LDAP" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "ZONE2LDAP" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME zone2ldap \- convert zonefiles to ldif .SH SYNOPSIS diff -Nru pdns-4.9.7/docs/zone2sql.1 pdns-4.9.14/docs/zone2sql.1 --- pdns-4.9.7/docs/zone2sql.1 2025-07-07 07:43:48.000000000 +0000 +++ pdns-4.9.14/docs/zone2sql.1 2026-04-08 09:59:48.000000000 +0000 @@ -27,7 +27,7 @@ .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "ZONE2SQL" "1" "Jul 07, 2025" "" "PowerDNS Authoritative Server" +.TH "ZONE2SQL" "1" "Apr 08, 2026" "" "PowerDNS Authoritative Server" .SH NAME zone2sql \- convert BIND zones to SQL .SH SYNOPSIS diff -Nru pdns-4.9.7/ext/yahttp/yahttp/reqresp.cpp pdns-4.9.14/ext/yahttp/yahttp/reqresp.cpp --- pdns-4.9.7/ext/yahttp/yahttp/reqresp.cpp 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/ext/yahttp/yahttp/reqresp.cpp 2026-04-08 09:58:17.000000000 +0000 @@ -40,7 +40,19 @@ } template - bool AsyncLoader::feed(const std::string& somedata) { + bool AsyncLoader::feed(const std::string& somedata) + { + if (state < 2) { + headersize += somedata.length(); // maye include some body data, we don't know yet... + if (headersize > target->max_header_size) { + if (target->kind == YAHTTP_TYPE_REQUEST) { + throw ParseError("Request header too large"); + } + else { + throw ParseError("Response header too large"); + } + } + } buffer.append(somedata); while(state < 2) { int cr=0; @@ -155,8 +167,8 @@ maxbody = minbody; } if (minbody < 1) return true; // guess there isn't anything left. - if (target->kind == YAHTTP_TYPE_REQUEST && static_cast(minbody) > target->max_request_size) throw ParseError("Max request body size exceeded"); - else if (target->kind == YAHTTP_TYPE_RESPONSE && static_cast(minbody) > target->max_response_size) throw ParseError("Max response body size exceeded"); + if (target->kind == YAHTTP_TYPE_REQUEST && minbody > target->max_request_size) throw ParseError("Max request body size exceeded"); + else if (target->kind == YAHTTP_TYPE_RESPONSE && minbody > target->max_response_size) throw ParseError("Max response body size exceeded"); } if (maxbody == 0) hasBody = false; @@ -175,20 +187,23 @@ buffer.copy(buf, pos); buf[pos]=0; // just in case... buffer.erase(buffer.begin(), buffer.begin()+pos+1); // remove line from buffer - if (sscanf(buf, "%x", &chunk_size) != 1) { + if (sscanf(buf, "%zx", &chunk_size) != 1) { throw ParseError("Unable to parse chunk size"); } if (chunk_size == 0) { state = 3; break; } // last chunk - if (chunk_size > (std::numeric_limits::max() - 2)) { + if (chunk_size > (std::numeric_limits::max() - 2) || chunk_size > maxbody) { throw ParseError("Chunk is too large"); } } else { int crlf=1; - if (buffer.size() < static_cast(chunk_size+1)) return false; // expect newline + if (buffer.size() < chunk_size+1) return false; // expect newline if (buffer.at(chunk_size) == '\r') { - if (buffer.size() < static_cast(chunk_size+2) || buffer.at(chunk_size+1) != '\n') return false; // expect newline after carriage return + if (buffer.size() < chunk_size+2 || buffer.at(chunk_size+1) != '\n') return false; // expect newline after carriage return crlf=2; } else if (buffer.at(chunk_size) != '\n') return false; + if (bodybuf.str().length() + chunk_size > maxbody) { + throw ParseError("Chunked body is too large"); + } std::string tmp = buffer.substr(0, chunk_size); buffer.erase(buffer.begin(), buffer.begin()+chunk_size+crlf); bodybuf << tmp; diff -Nru pdns-4.9.7/ext/yahttp/yahttp/reqresp.hpp pdns-4.9.14/ext/yahttp/yahttp/reqresp.hpp --- pdns-4.9.7/ext/yahttp/yahttp/reqresp.hpp 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/ext/yahttp/yahttp/reqresp.hpp 2026-04-08 09:58:17.000000000 +0000 @@ -20,6 +20,10 @@ #include +#ifndef YAHTTP_MAX_HEADER_SIZE +#define YAHTTP_MAX_HEADER_SIZE (100 * 1024) +#endif + #ifndef YAHTTP_MAX_REQUEST_SIZE #define YAHTTP_MAX_REQUEST_SIZE 2097152 #endif @@ -107,6 +111,7 @@ #endif max_request_size = YAHTTP_MAX_REQUEST_SIZE; max_response_size = YAHTTP_MAX_RESPONSE_SIZE; + max_header_size = YAHTTP_MAX_HEADER_SIZE; url = ""; method = ""; statusText = ""; @@ -129,6 +134,7 @@ this->parameters = rhs.parameters; this->getvars = rhs.getvars; this->body = rhs.body; this->max_request_size = rhs.max_request_size; this->max_response_size = rhs.max_response_size; this->version = rhs.version; + this->max_header_size = rhs.max_header_size; #ifdef HAVE_CPP_FUNC_PTR this->renderer = rhs.renderer; #endif @@ -142,6 +148,7 @@ this->parameters = rhs.parameters; this->getvars = rhs.getvars; this->body = rhs.body; this->max_request_size = rhs.max_request_size; this->max_response_size = rhs.max_response_size; this->version = rhs.version; + this->max_header_size = rhs.max_header_size; #ifdef HAVE_CPP_FUNC_PTR this->renderer = rhs.renderer; #endif @@ -165,8 +172,9 @@ std::string body; // renderer; //target = target_; hasBody = false; buffer = ""; + headersize = 0; this->target->initialize(); }; //method != method) { // method did not match, record it though so we can return correct result matched = false; - seen = true; + // The OPTIONS handler registered in pdns/webserver.cc matches every + // url, and would cause "not found" errors to always be superseded + // with "found, but wrong method" errors, so don't pretend there has + // been a match in this case. + if (method != "OPTIONS") { + seen = true; + } continue; } if (matched) { diff -Nru pdns-4.9.7/m4/pdns_check_libcrypto.m4 pdns-4.9.14/m4/pdns_check_libcrypto.m4 --- pdns-4.9.7/m4/pdns_check_libcrypto.m4 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/m4/pdns_check_libcrypto.m4 2026-04-08 09:58:17.000000000 +0000 @@ -55,6 +55,7 @@ LIBCRYPTO_LIBS=`$PKG_CONFIG libcrypto --libs-only-l 2>/dev/null` LIBCRYPTO_INCLUDES=`$PKG_CONFIG libcrypto --cflags-only-I 2>/dev/null` ssldir=`$PKG_CONFIG libcrypto --variable=prefix 2>/dev/null` + sslincdir=`$PKG_CONFIG libcrypto --variable=includedir 2>/dev/null` found=true fi fi @@ -78,6 +79,7 @@ LIBCRYPTO_INCLUDES="-I$ssldir/include" LIBCRYPTO_LDFLAGS="-L$ssldir/lib" LIBCRYPTO_LIBS="-lcrypto" + sslincdir="$ssldir/include" found=true AC_MSG_RESULT([yes]) break diff -Nru pdns-4.9.7/m4/pdns_check_libcrypto_ecdsa.m4 pdns-4.9.14/m4/pdns_check_libcrypto_ecdsa.m4 --- pdns-4.9.7/m4/pdns_check_libcrypto_ecdsa.m4 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/m4/pdns_check_libcrypto_ecdsa.m4 2026-04-08 09:58:17.000000000 +0000 @@ -12,11 +12,11 @@ # Find the headers we need for ECDSA libcrypto_ecdsa=yes - AC_CHECK_HEADER([$ssldir/include/openssl/ecdsa.h], [ + AC_CHECK_HEADER([$sslincdir/openssl/ecdsa.h], [ AC_CHECK_DECLS([NID_X9_62_prime256v1, NID_secp384r1], [ : ], [ libcrypto_ecdsa=no ], [AC_INCLUDES_DEFAULT -#include <$ssldir/include/openssl/evp.h> +#include <$sslincdir/openssl/evp.h> ]) ], [ libcrypto_ecdsa=no diff -Nru pdns-4.9.7/m4/pdns_check_libcrypto_eddsa.m4 pdns-4.9.14/m4/pdns_check_libcrypto_eddsa.m4 --- pdns-4.9.7/m4/pdns_check_libcrypto_eddsa.m4 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/m4/pdns_check_libcrypto_eddsa.m4 2026-04-08 09:58:17.000000000 +0000 @@ -17,13 +17,13 @@ AC_DEFINE([HAVE_LIBCRYPTO_ED25519], [1], [define to 1 if OpenSSL ed25519 support is available.]) ], [ : ], [AC_INCLUDES_DEFAULT - #include <$ssldir/include/openssl/evp.h>]) + #include <$sslincdir/openssl/evp.h>]) AC_CHECK_DECLS([NID_ED448], [ libcrypto_ed448=yes AC_DEFINE([HAVE_LIBCRYPTO_ED448], [1], [define to 1 if OpenSSL ed448 support is available.]) ], [ : ], [AC_INCLUDES_DEFAULT - #include <$ssldir/include/openssl/evp.h>]) + #include <$sslincdir/openssl/evp.h>]) AS_IF([test "$libcrypto_ed25519" = "yes" -o "$libcrypto_ed448" = "yes"], [ AC_DEFINE([HAVE_LIBCRYPTO_EDDSA], [1], [define to 1 if OpenSSL EDDSA support is available.]) diff -Nru pdns-4.9.7/modules/bindbackend/bindbackend2.cc pdns-4.9.14/modules/bindbackend/bindbackend2.cc --- pdns-4.9.7/modules/bindbackend/bindbackend2.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/modules/bindbackend/bindbackend2.cc 2026-04-08 09:58:17.000000000 +0000 @@ -1413,13 +1413,35 @@ return bbd; } -bool Bind2Backend::createSecondaryDomain(const string& ip, const DNSName& domain, const string& /* nameserver */, const string& account) +bool Bind2Backend::createSecondaryDomain(const string& ipAddress, const DNSName& domain, const string& /* nameserver */, const string& account) { - string filename = getArg("autoprimary-destdir") + '/' + domain.toStringNoDot(); + std::string domainname = domain.toStringNoDot(); + + // Reject domain name if it embeds quotes; this may happen if 8bit-dns is + // used, and bind currently does not allow for character escapes in zone + // names. + if (domainname.find_first_of("\"") != std::string::npos) { + SLOG(g_log << Logger::Error << d_logprefix + << " Unable to accept autosecondary zone '" << domain + << "' from autoprimary " << ipAddress + << " due to unauthorized characters in domain name for bind configuration file" + << endl, + d_slog->error(Logr::Error, "unauthorized characters in domain name for bind configuration file", "Unable to accept autosecondary zone", "zone", Logging::Loggable(domain), "autoprimary address", Logging::Loggable(ipAddress))); + throw PDNSException("Unauthorized characters in domain name for bind configuration file"); + } + + string filename = getArg("autoprimary-destdir") + '/'; + if (domainname.empty()) { + filename.append("rootzone."); + } + else { + // Make sure the zone file name does not contain path separators. + filename.append(boost::replace_all_copy(domainname, "/", "\\047")); + } g_log << Logger::Warning << d_logprefix << " Writing bind config zone statement for superslave zone '" << domain - << "' from autoprimary " << ip << endl; + << "' from autoprimary " << ipAddress << endl; { std::lock_guard l2(s_autosecondary_config_lock); @@ -1431,18 +1453,18 @@ } c_of << endl; - c_of << "# AutoSecondary zone '" << domain.toString() << "' (added: " << nowTime() << ") (account: " << account << ')' << endl; - c_of << "zone \"" << domain.toStringNoDot() << "\" {" << endl; + c_of << "# AutoSecondary zone '" << domainname << "' (added: " << nowTime() << ") (account: " << account << ')' << endl; + c_of << "zone \"" << domainname << "\" {" << endl; c_of << "\ttype secondary;" << endl; c_of << "\tfile \"" << filename << "\";" << endl; - c_of << "\tprimaries { " << ip << "; };" << endl; + c_of << "\tprimaries { " << ipAddress << "; };" << endl; c_of << "};" << endl; c_of.close(); } BB2DomainInfo bbd = createDomainEntry(domain, filename); bbd.d_kind = DomainInfo::Secondary; - bbd.d_primaries.push_back(ComboAddress(ip, 53)); + bbd.d_primaries.push_back(ComboAddress(ipAddress, 53)); bbd.setCtime(); safePutBBDomainInfo(bbd); diff -Nru pdns-4.9.7/modules/geoipbackend/geoipinterface-mmdb.cc pdns-4.9.14/modules/geoipbackend/geoipinterface-mmdb.cc --- pdns-4.9.7/modules/geoipbackend/geoipinterface-mmdb.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/modules/geoipbackend/geoipinterface-mmdb.cc 2026-04-08 09:58:17.000000000 +0000 @@ -46,8 +46,9 @@ else throw PDNSException(string("Unsupported mode ") + modeStr + ("for geoipbackend-mmdb")); memset(&d_s, 0, sizeof(d_s)); - if ((ec = MMDB_open(fname.c_str(), flags, &d_s)) < 0) + if ((ec = MMDB_open(fname.c_str(), flags, &d_s)) != MMDB_SUCCESS) { throw PDNSException(string("Cannot open ") + fname + string(": ") + string(MMDB_strerror(ec))); + } d_lang = language; g_log << Logger::Debug << "Opened MMDB database " << fname << "(type: " << d_s.metadata.database_type << " version: " << d_s.metadata.binary_format_major_version << "." << d_s.metadata.binary_format_minor_version << ")" << endl; } diff -Nru pdns-4.9.7/modules/gmysqlbackend/gmysqlbackend.cc pdns-4.9.14/modules/gmysqlbackend/gmysqlbackend.cc --- pdns-4.9.7/modules/gmysqlbackend/gmysqlbackend.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/modules/gmysqlbackend/gmysqlbackend.cc 2026-04-08 09:58:17.000000000 +0000 @@ -52,17 +52,17 @@ void gMySQLBackend::reconnect() { - setDB(new SMySQL(getArg("dbname"), - getArg("host"), - getArgAsNum("port"), - getArg("socket"), - getArg("user"), - getArg("password"), - getArg("group"), - mustDo("innodb-read-committed"), - getArgAsNum("timeout"), - mustDo("thread-cleanup"), - mustDo("ssl"))); + setDB(std::unique_ptr(new SMySQL(getArg("dbname"), + getArg("host"), + getArgAsNum("port"), + getArg("socket"), + getArg("user"), + getArg("password"), + getArg("group"), + mustDo("innodb-read-committed"), + getArgAsNum("timeout"), + mustDo("thread-cleanup"), + mustDo("ssl")))); allocateStatements(); } diff -Nru pdns-4.9.7/modules/godbcbackend/godbcbackend.cc pdns-4.9.14/modules/godbcbackend/godbcbackend.cc --- pdns-4.9.7/modules/godbcbackend/godbcbackend.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/modules/godbcbackend/godbcbackend.cc 2026-04-08 09:58:17.000000000 +0000 @@ -38,7 +38,7 @@ GSQLBackend(mode, suffix) { try { - setDB(new SODBC(getArg("datasource"), getArg("username"), getArg("password"))); + setDB(std::unique_ptr(new SODBC(getArg("datasource"), getArg("username"), getArg("password")))); } catch (SSqlException& e) { g_log << Logger::Error << mode << " Connection failed: " << e.txtReason() << std::endl; diff -Nru pdns-4.9.7/modules/godbcbackend/sodbc.cc pdns-4.9.14/modules/godbcbackend/sodbc.cc --- pdns-4.9.7/modules/godbcbackend/sodbc.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/modules/godbcbackend/sodbc.cc 2026-04-08 09:58:17.000000000 +0000 @@ -140,8 +140,10 @@ { prepareStatement(); ODBCParam p; + // NOLINTBEGIN(cppcoreguidelines-owning-memory) p.ParameterValuePtr = new UDWORD{value}; p.LenPtr = new SQLLEN{sizeof(UDWORD)}; + // NOLINTEND(cppcoreguidelines-owning-memory) p.ParameterType = SQL_INTEGER; p.ValueType = SQL_INTEGER; return bind(name, p); @@ -151,8 +153,10 @@ { prepareStatement(); ODBCParam p; + // NOLINTBEGIN(cppcoreguidelines-owning-memory) p.ParameterValuePtr = new ULONG{value}; p.LenPtr = new SQLLEN{sizeof(ULONG)}; + // NOLINTEND(cppcoreguidelines-owning-memory) p.ParameterType = SQL_INTEGER; p.ValueType = SQL_INTEGER; return bind(name, p); @@ -162,8 +166,10 @@ { prepareStatement(); ODBCParam p; + // NOLINTBEGIN(cppcoreguidelines-owning-memory) p.ParameterValuePtr = new unsigned long long{value}; p.LenPtr = new SQLLEN{sizeof(unsigned long long)}; + // NOLINTEND(cppcoreguidelines-owning-memory) p.ParameterType = SQL_BIGINT; p.ValueType = SQL_C_UBIGINT; return bind(name, p); @@ -179,11 +185,12 @@ prepareStatement(); ODBCParam p; + // NOLINTBEGIN(cppcoreguidelines-owning-memory) p.ParameterValuePtr = (char*)new char[value.size() + 1]; value.copy((char*)p.ParameterValuePtr, value.size()); ((char*)p.ParameterValuePtr)[value.size()] = 0; - p.LenPtr = new SQLLEN; - *(p.LenPtr) = value.size(); + p.LenPtr = new SQLLEN{static_cast(value.size())}; + // NOLINTEND(cppcoreguidelines-owning-memory) p.ParameterType = SQL_VARCHAR; p.ValueType = SQL_C_CHAR; @@ -199,8 +206,9 @@ ODBCParam p; p.ParameterValuePtr = NULL; - p.LenPtr = new SQLLEN; - *(p.LenPtr) = SQL_NULL_DATA; + // NOLINTBEGIN(cppcoreguidelines-owning-memory) + p.LenPtr = new SQLLEN{SQL_NULL_DATA}; + // NOLINTEND(cppcoreguidelines-owning-memory) p.ParameterType = SQL_VARCHAR; p.ValueType = SQL_C_CHAR; @@ -262,12 +270,23 @@ SQLCloseCursor(d_statement); // hack, this probably violates some state transitions for (auto& i : d_req_bind) { - if (i.ParameterType == SQL_VARCHAR) - delete[](char*) i.ParameterValuePtr; - else if (i.ParameterType == SQL_INTEGER) - delete (ULONG*)i.ParameterValuePtr; - else if (i.ParameterType == SQL_C_UBIGINT) - delete (unsigned long long*)i.ParameterValuePtr; + // NOLINTBEGIN(cppcoreguidelines-owning-memory) + if (i.ParameterType == SQL_VARCHAR) { + delete[] static_cast(i.ParameterValuePtr); + } + else if (i.ParameterType == SQL_INTEGER) { + if (*i.LenPtr == sizeof(UDWORD)) { + delete static_cast(i.ParameterValuePtr); + } + else { + delete static_cast(i.ParameterValuePtr); + } + } + else if (i.ParameterType == SQL_C_UBIGINT) { + delete static_cast(i.ParameterValuePtr); + } + // NOLINTEND(cppcoreguidelines-owning-memory) + delete i.LenPtr; } d_req_bind.clear(); diff -Nru pdns-4.9.7/modules/gpgsqlbackend/gpgsqlbackend.cc pdns-4.9.14/modules/gpgsqlbackend/gpgsqlbackend.cc --- pdns-4.9.7/modules/gpgsqlbackend/gpgsqlbackend.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/modules/gpgsqlbackend/gpgsqlbackend.cc 2026-04-08 09:58:17.000000000 +0000 @@ -40,13 +40,13 @@ GSQLBackend(mode, suffix) { try { - setDB(new SPgSQL(getArg("dbname"), - getArg("host"), - getArg("port"), - getArg("user"), - getArg("password"), - getArg("extra-connection-parameters"), - mustDo("prepared-statements"))); + setDB(std::unique_ptr(new SPgSQL(getArg("dbname"), + getArg("host"), + getArg("port"), + getArg("user"), + getArg("password"), + getArg("extra-connection-parameters"), + mustDo("prepared-statements")))); } catch (SSqlException& e) { diff -Nru pdns-4.9.7/modules/gsqlite3backend/gsqlite3backend.cc pdns-4.9.14/modules/gsqlite3backend/gsqlite3backend.cc --- pdns-4.9.7/modules/gsqlite3backend/gsqlite3backend.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/modules/gsqlite3backend/gsqlite3backend.cc 2026-04-08 09:58:17.000000000 +0000 @@ -43,15 +43,15 @@ GSQLBackend(mode, suffix) { try { - SSQLite3* ptr = new SSQLite3(getArg("database"), getArg("pragma-journal-mode")); - setDB(ptr); - allocateStatements(); + auto ptr = std::unique_ptr(new SSQLite3(getArg("database"), getArg("pragma-journal-mode"))); if (!getArg("pragma-synchronous").empty()) { ptr->execute("PRAGMA synchronous=" + getArg("pragma-synchronous")); } if (mustDo("pragma-foreign-keys")) { ptr->execute("PRAGMA foreign_keys = 1"); } + setDB(std::move(ptr)); + allocateStatements(); } catch (SSqlException& e) { g_log << Logger::Error << mode << ": connection failed: " << e.txtReason() << std::endl; diff -Nru pdns-4.9.7/modules/ldapbackend/native.cc pdns-4.9.14/modules/ldapbackend/native.cc --- pdns-4.9.7/modules/ldapbackend/native.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/modules/ldapbackend/native.cc 2026-04-08 09:58:17.000000000 +0000 @@ -221,7 +221,7 @@ stringtok(parts, toLower(qname.toString()), "."); for (auto i = parts.crbegin(); i != parts.crend(); i++) { - dn = "dc=" + *i + "," + dn; + dn = "dc=" + d_pldap->escape(*i) + "," + dn; } g_log << Logger::Debug << d_myname << " Search = basedn: " << dn + getArg("basedn") << ", filter: " << filter << ", qtype: " << qtype.toString() << endl; diff -Nru pdns-4.9.7/modules/ldapbackend/powerldap.cc pdns-4.9.14/modules/ldapbackend/powerldap.cc --- pdns-4.9.7/modules/ldapbackend/powerldap.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/modules/ldapbackend/powerldap.cc 2026-04-08 09:58:17.000000000 +0000 @@ -392,22 +392,68 @@ return ldapGetError(d_ld, rc); } -const string PowerLDAP::escape(const string& str) +// Escape sensitive characters according to the rules in RFC4514, section 2.4 +// and RFC4515, section 3. +const std::string PowerLDAP::escape(const string& input) { - string a; - string::const_iterator i; - char tmp[4]; + std::string out; + std::array hexbuf{}; + auto length = input.length(); - for (i = str.begin(); i != str.end(); i++) { - // RFC4515 3 - if ((unsigned char)*i == '*' || (unsigned char)*i == '(' || (unsigned char)*i == ')' || (unsigned char)*i == '\\' || (unsigned char)*i == '\0' || (unsigned char)*i > 127) { - snprintf(tmp, sizeof(tmp), "\\%02x", (unsigned char)*i); - - a += tmp; + out.reserve(length); + for (decltype(length) pos = 0; pos < length; ++pos) { + uint8_t chr = static_cast(input[pos]); + // Perform UTF-8 encoding of 8-bit values if needed + if (chr >= 0x80) { + ::snprintf(hexbuf.data(), hexbuf.size(), "\\%02X", + static_cast(0xc0 | ((chr >> 6) & 0x3f))); + out.append(hexbuf.data()); + ::snprintf(hexbuf.data(), hexbuf.size(), "\\%02X", + static_cast(0x80 | (chr & 0x3f))); + out.append(hexbuf.data()); + } + else { + bool escape4514{false}; + bool escape4515{false}; + // Characters which need escaping regardless of their position + switch (chr) { + case '"': + case '+': + case ',': + case ';': + case '<': + case '>': + escape4514 = true; + break; + case '*': + case '(': + case ')': + case '\\': + case '\0': + escape4515 = true; + break; + default: + break; + } + // Characters which need escaping if in first position + if (pos == 0) { + escape4514 |= chr == ' ' || chr == '#'; + } + // Characters which need escaping if in last position + if (pos == length - 1) { + escape4514 |= chr == ' '; + } + if (escape4515) { + ::snprintf(hexbuf.data(), hexbuf.size(), "\\%02X", chr); + out.append(hexbuf.data()); + } + else { + if (escape4514) { + out.append(1, '\\'); + } + out.append(1, chr); + } } - else - a += *i; } - - return a; + return out; } diff -Nru pdns-4.9.7/modules/lmdbbackend/lmdbbackend.cc pdns-4.9.14/modules/lmdbbackend/lmdbbackend.cc --- pdns-4.9.7/modules/lmdbbackend/lmdbbackend.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/modules/lmdbbackend/lmdbbackend.cc 2026-04-08 09:58:17.000000000 +0000 @@ -645,6 +645,34 @@ return true; } +// Serial number cache + +// Retrieve the transient domain info for the given domain, if any +bool LMDBBackend::TransientDomainInfoCache::get(uint32_t domainid, TransientDomainInfo& data) const +{ + if (auto iter = d_data.find(domainid); iter != d_data.end()) { + data = iter->second; + return true; + } + return false; +} + +// Remove the transient domain info for the given domain +void LMDBBackend::TransientDomainInfoCache::remove(uint32_t domainid) +{ + if (auto iter = d_data.find(domainid); iter != d_data.end()) { + d_data.erase(iter); + } +} + +// Create or update the transient domain info for the given domain +void LMDBBackend::TransientDomainInfoCache::update(uint32_t domainid, const TransientDomainInfo& data) +{ + d_data.insert_or_assign(domainid, data); +} + +SharedLockGuarded LMDBBackend::s_transient_domain_info; + LMDBBackend::LMDBBackend(const std::string& suffix) { // overlapping domain ids in combination with relative names are a recipe for disaster @@ -673,6 +701,8 @@ throw std::runtime_error(std::string("Unable to parse the 'map-size' LMDB value: ") + e.what()); } + d_write_notification_update = mustDo("write-notification-update"); + if (mustDo("lightning-stream")) { d_random_ids = true; d_handle_dups = true; @@ -779,6 +809,23 @@ d_dolog = ::arg().mustDo("query-logging"); } +LMDBBackend::~LMDBBackend() +{ + // LMDB internals require that, if we have multiple transactions active, + // we destroy them in the reverse order of their creation, thus we can't + // let the default destructor take care of d_rotxn and d_rwtxn. + if (d_txnorder) { + // RO transaction more recent than RW transaction + d_rotxn.reset(); + d_rwtxn.reset(); + } + else { + // RW transaction more recent than RO transaction + d_rwtxn.reset(); + d_rotxn.reset(); + } +} + namespace boost { namespace serialization @@ -1002,6 +1049,58 @@ } } +bool LMDBBackend::findDomain(const DNSName& domain, DomainInfo& info) const +{ + auto rotxn = d_tdomains->getROTransaction(); + auto domain_id = rotxn.get<0>(domain, info); + if (domain_id == 0) { + return false; + } + info.id = static_cast(domain_id); + return true; +} + +bool LMDBBackend::findDomain(uint32_t domainid, DomainInfo& info) const +{ + auto rotxn = d_tdomains->getROTransaction(); + if (!rotxn.get(domainid, info)) { + return false; + } + info.id = domainid; + return true; +} + +void LMDBBackend::consolidateDomainInfo(DomainInfo& info) const +{ + // Update the DomainInfo values if we have cached data in memory. + if (!d_write_notification_update) { + auto container = s_transient_domain_info.read_lock(); + TransientDomainInfo tdi; + if (container->get(info.id, tdi)) { + info.notified_serial = tdi.notified_serial; + info.last_check = tdi.last_check; + } + } +} + +void LMDBBackend::writeDomainInfo(const DomainInfo& info) +{ + if (!d_write_notification_update) { + auto container = s_transient_domain_info.write_lock(); + TransientDomainInfo tdi; + if (container->get(info.id, tdi)) { + // Only remove the in-memory value if it has not been modified since the + // DomainInfo data was set up. + if (tdi.notified_serial == info.notified_serial && tdi.last_check == info.last_check) { + container->remove(info.id); + } + } + } + auto txn = d_tdomains->getRWTransaction(); + txn.put(info, info.id); + txn.commit(); +} + /* Here's the complicated story. Other backends have just one transaction, which is either on or not. @@ -1019,17 +1118,17 @@ // cout <<"startTransaction("<getROTransaction(); - DomainInfo di; - real_id = rotxn.get<0>(domain, di); - // cout<<"real_id = "<(info.id); } if (d_rwtxn) { throw DBException("Attempt to start a transaction while one was open already"); } d_rwtxn = getRecordsRWTransaction(real_id); + d_txnorder = false; d_transactiondomain = domain; d_transactiondomainid = real_id; @@ -1167,15 +1266,15 @@ needCommit = true; } - DomainInfo di; - if (!d_tdomains->getROTransaction().get(domain_id, di)) { + DomainInfo info; + if (!findDomain(domain_id, info)) { return false; } compoundOrdername co; auto cursor = txn->txn->getCursor(txn->db->dbi); MDBOutVal key, val; - string match = co(domain_id, qname.makeRelative(di.zone), qt.getCode()); + string match = co(domain_id, qname.makeRelative(info.zone), qt.getCode()); if (!cursor.find(match, key, val)) { cursor.del(); } @@ -1185,7 +1284,7 @@ for (const auto& rr : rrset) { LMDBResourceRecord lrr(rr); lrr.content = serializeContent(lrr.qtype.getCode(), lrr.qname, lrr.content); - lrr.qname.makeUsRelative(di.zone); + lrr.qname.makeUsRelative(info.zone); adjustedRRSet.emplace_back(lrr); } @@ -1324,10 +1423,10 @@ if (!d_handle_dups) { // get domain id - auto txn = d_tdomains->getROTransaction(); - - DomainInfo di; - idvec.push_back(txn.get<0>(domain, di)); + DomainInfo info; + if (findDomain(domain, info)) { + idvec.push_back(info.id); + } } else { // this transaction used to be RO. @@ -1372,6 +1471,10 @@ commitTransaction(); // Remove zone + { + auto container = s_transient_domain_info.write_lock(); + container->remove(static_cast(id)); + } auto txn = d_tdomains->getRWTransaction(); txn.del(id); txn.commit(); @@ -1386,23 +1489,19 @@ { d_includedisabled = include_disabled; - DomainInfo di; - { - auto dtxn = d_tdomains->getROTransaction(); - if ((di.id = dtxn.get<0>(target, di))) { - // cerr << "Found domain " << target << " on domain_id " << di.id << ", list requested " << id << endl; - } - else { - // cerr << "Did not find " << target << endl; - return false; - } + DomainInfo info; + if (!findDomain(target, info)) { + // cerr << "Did not find " << target << endl; + return false; } + // cerr << "Found domain " << target << " on domain_id " << info.id << ", list requested " << id << endl; - d_rotxn = getRecordsROTransaction(di.id, d_rwtxn); + d_rotxn = getRecordsROTransaction(info.id, d_rwtxn); + d_txnorder = true; d_getcursor = std::make_shared(d_rotxn->txn->getCursor(d_rotxn->db->dbi)); compoundOrdername co; - d_matchkey = co(di.id); + d_matchkey = co(info.id); MDBOutVal key, val; if (d_getcursor->prefix(d_matchkey, key, val) != 0) { @@ -1428,26 +1527,26 @@ d_includedisabled = false; DNSName hunt(qdomain); - DomainInfo di; + DomainInfo info; if (zoneId < 0) { - auto rotxn = d_tdomains->getROTransaction(); - do { - zoneId = rotxn.get<0>(hunt, di); - } while (!zoneId && type != QType::SOA && hunt.chopOff()); - if (zoneId <= 0) { + if (findDomain(hunt, info)) { + break; + } + } while (type != QType::SOA && hunt.chopOff()); + if (info.id <= 0) { // cout << "Did not find zone for "<< qdomain<getROTransaction().get(zoneId, di)) { + if (!findDomain(zoneId, info)) { // cout<<"Could not find a zone with id "<(d_rotxn->txn->getCursor(d_rotxn->db->dbi)); MDBOutVal key, val; if (type.getCode() == QType::ANY) { - d_matchkey = co(zoneId, relqname); + d_matchkey = co(info.id, relqname); } else { - d_matchkey = co(zoneId, relqname, type.getCode()); + d_matchkey = co(info.id, relqname, type.getCode()); } if (d_getcursor->prefix(d_matchkey, key, val) != 0) { @@ -1513,6 +1613,7 @@ } serFromString(d_currentVal.get(), d_currentrrset); + d_currentrrsettime = static_cast(LMDBLS::LSgetTimestamp(d_currentVal.getNoStripHeader()) / (1000UL * 1000UL * 1000UL)); d_currentrrsetpos = 0; } else { @@ -1567,6 +1668,7 @@ rr.domain_id = zr.domain_id; rr.auth = zr.auth; rr.disabled = zr.disabled; + rr.last_modified = d_currentrrsettime; return true; } @@ -1593,27 +1695,11 @@ bool LMDBBackend::getDomainInfo(const DNSName& domain, DomainInfo& di, bool getserial) { - { - auto txn = d_tdomains->getROTransaction(); - // auto range = txn.prefix_range<0>(domain); - - // bool found = false; - - // for (auto& iter = range.first ; iter != range.second; ++iter) { - // found = true; - // di.id = iter.getID(); - // di.backend = this; - // } - - // if (!found) { - // return false; - // } - if (!(di.id = txn.get<0>(domain, di))) { - return false; - } - - di.backend = this; + if (!findDomain(domain, di)) { + return false; } + di.backend = this; + consolidateDomainInfo(di); if (getserial) { getSerial(di); @@ -1624,32 +1710,25 @@ int LMDBBackend::genChangeDomain(const DNSName& domain, const std::function& func) { - auto txn = d_tdomains->getRWTransaction(); - - DomainInfo di; - - auto id = txn.get<0>(domain, di); - func(di); - txn.put(di, id); - - txn.commit(); + DomainInfo info; + if (!findDomain(domain, info)) { + return static_cast(false); + } + consolidateDomainInfo(info); + func(info); + writeDomainInfo(info); return true; } int LMDBBackend::genChangeDomain(uint32_t id, const std::function& func) { - DomainInfo di; - - auto txn = d_tdomains->getRWTransaction(); - - if (!txn.get(id, di)) - return false; - - func(di); - - txn.put(di, id); - - txn.commit(); + DomainInfo info; + if (!findDomain(id, info)) { + return static_cast(false); + } + consolidateDomainInfo(info); + func(info); + writeDomainInfo(info); return true; } @@ -1676,20 +1755,20 @@ bool LMDBBackend::createDomain(const DNSName& domain, const DomainInfo::DomainKind kind, const vector& primaries, const string& account) { - DomainInfo di; + DomainInfo info; + if (findDomain(domain, info)) { + throw DBException("Domain '" + domain.toLogString() + "' exists already"); + } { auto txn = d_tdomains->getRWTransaction(); - if (txn.get<0>(domain, di)) { - throw DBException("Domain '" + domain.toLogString() + "' exists already"); - } - di.zone = domain; - di.kind = kind; - di.primaries = primaries; - di.account = account; + info.zone = domain; + info.kind = kind; + info.primaries = primaries; + info.account = account; - txn.put(di, 0, d_random_ids); + txn.put(info, 0, d_random_ids); txn.commit(); } @@ -1714,21 +1793,17 @@ } for (const auto& zone : dups) { - DomainInfo di; - + DomainInfo info; // this get grabs the oldest item if there are duplicates - di.id = txn.get<0>(zone, di); - - if (di.id == 0) { - // .get actually found nothing for us + if (!findDomain(zone, info)) { continue; } - - di.backend = this; - zonemap[di.zone] = di; + info.backend = this; + zonemap[info.zone] = info; } for (auto& [k, v] : zonemap) { + consolidateDomainInfo(v); if (allow(v)) { domains->push_back(std::move(v)); } @@ -1740,6 +1815,7 @@ di.id = iter.getID(); di.backend = this; + consolidateDomainInfo(di); if (allow(di)) { domains->push_back(di); } @@ -1793,16 +1869,34 @@ void LMDBBackend::setStale(uint32_t domain_id) { - genChangeDomain(domain_id, [](DomainInfo& di) { - di.last_check = 0; - }); + setLastCheckTime(domain_id, 0); } void LMDBBackend::setFresh(uint32_t domain_id) { - genChangeDomain(domain_id, [](DomainInfo& di) { - di.last_check = time(nullptr); - }); + setLastCheckTime(domain_id, time(nullptr)); +} + +void LMDBBackend::setLastCheckTime(uint32_t domain_id, time_t last_check) +{ + if (d_write_notification_update) { + genChangeDomain(domain_id, [last_check](DomainInfo& info) { + info.last_check = last_check; + }); + return; + } + + DomainInfo info; + if (findDomain(domain_id, info)) { + auto container = s_transient_domain_info.write_lock(); + TransientDomainInfo tdi; + if (!container->get(info.id, tdi)) { + // No data yet, initialize from DomainInfo + tdi.notified_serial = info.notified_serial; + } + tdi.last_check = last_check; + container->update(info.id, tdi); + } } void LMDBBackend::getUpdatedPrimaries(vector& updatedDomains, std::unordered_set& catalogs, CatalogHashMap& catalogHashes) @@ -1836,9 +1930,24 @@ void LMDBBackend::setNotified(uint32_t domain_id, uint32_t serial) { - genChangeDomain(domain_id, [serial](DomainInfo& di) { - di.notified_serial = serial; - }); + if (d_write_notification_update) { + genChangeDomain(domain_id, [serial](DomainInfo& info) { + info.notified_serial = serial; + }); + return; + } + + DomainInfo info; + if (findDomain(domain_id, info)) { + auto container = s_transient_domain_info.write_lock(); + TransientDomainInfo tdi; + if (!container->get(info.id, tdi)) { + // No data yet, initialize from DomainInfo + tdi.last_check = info.last_check; + } + tdi.notified_serial = serial; + container->update(info.id, tdi); + } } class getCatalogMembersReturnFalseException : std::runtime_error @@ -2050,16 +2159,17 @@ return true; } +// NOLINTNEXTLINE(readability-function-cognitive-complexity) bool LMDBBackend::getBeforeAndAfterNamesAbsolute(uint32_t id, const DNSName& qname, DNSName& unhashed, DNSName& before, DNSName& after) { // cout << __PRETTY_FUNCTION__<< ": "<getROTransaction().get(id, di)) { + DomainInfo info; + if (!findDomain(id, info)) { // domain does not exist, tough luck return false; } - // cout <<"Zone: "<()); - unhashed = DNSName(lrr.content.c_str(), lrr.content.size(), 0, false) + di.zone; + unhashed = DNSName(lrr.content.c_str(), lrr.content.size(), 0, false) + info.zone; // now to find after .. at the beginning of the zone if (cursor.lower_bound(co(id), key, val)) { @@ -2191,7 +2301,7 @@ } } before = co.getQName(key.getNoStripHeader()); - unhashed = DNSName(lrr.content.c_str(), lrr.content.size(), 0, false) + di.zone; + unhashed = DNSName(lrr.content.c_str(), lrr.content.size(), 0, false) + info.zone; // cout <<"Should still find 'after'!"<()); - unhashed = DNSName(lrr.content.c_str(), lrr.content.size(), 0, false) + di.zone; + unhashed = DNSName(lrr.content.c_str(), lrr.content.size(), 0, false) + info.zone; // cout<<"Went backwards, found "<getROTransaction().get(domain_id, di)) { + DomainInfo info; + if (!findDomain(domain_id, info)) { // cout<<"Could not find domain_id "<getROTransaction(); - if (!rotxn.get(domain_id, di)) { - // cout <<"No such domain with id "<txn->del(txn->db->dbi, co(domain_id, n, 0)); } } @@ -2772,6 +2882,7 @@ declare(suffix, "random-ids", "Numeric IDs inside the database are generated randomly instead of sequentially", "no"); declare(suffix, "map-size", "LMDB map size in megabytes", (sizeof(void*) == 4) ? "100" : "16000"); declare(suffix, "flag-deleted", "Flag entries on deletion instead of deleting them", "no"); + declare(suffix, "write-notification-update", "Update domain table upon notification", "yes"); declare(suffix, "lightning-stream", "Run in Lightning Stream compatible mode", "no"); } DNSBackend* make(const string& suffix = "") override diff -Nru pdns-4.9.7/modules/lmdbbackend/lmdbbackend.hh pdns-4.9.14/modules/lmdbbackend/lmdbbackend.hh --- pdns-4.9.7/modules/lmdbbackend/lmdbbackend.hh 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/modules/lmdbbackend/lmdbbackend.hh 2026-04-08 09:58:17.000000000 +0000 @@ -61,6 +61,7 @@ { public: explicit LMDBBackend(const string& suffix = ""); + ~LMDBBackend(); bool list(const DNSName& target, int id, bool include_disabled) override; @@ -304,12 +305,20 @@ shared_ptr d_rotxn; // for lookup and list shared_ptr d_rwtxn; // for feedrecord within begin/aborttransaction + bool d_txnorder{false}; // whether d_rotxn is more recent than d_rwtxn std::shared_ptr getRecordsRWTransaction(uint32_t id); std::shared_ptr getRecordsROTransaction(uint32_t id, const std::shared_ptr& rwtxn = nullptr); int genChangeDomain(const DNSName& domain, const std::function& func); int genChangeDomain(uint32_t id, const std::function& func); void deleteDomainRecords(RecordsRWTransaction& txn, uint32_t domain_id, uint16_t qtype = QType::ANY); + bool findDomain(const DNSName& domain, DomainInfo& info) const; + bool findDomain(uint32_t domainid, DomainInfo& info) const; + void consolidateDomainInfo(DomainInfo& info) const; + void writeDomainInfo(const DomainInfo& info); + + void setLastCheckTime(uint32_t domain_id, time_t last_check); + void getAllDomainsFiltered(vector* domains, const std::function& allow); bool getSerial(DomainInfo& di); @@ -321,8 +330,29 @@ std::string d_matchkey; DNSName d_lookupdomain; + // Transient DomainInfo data, not necessarily synchronized with the + // database. + struct TransientDomainInfo + { + time_t last_check{}; + uint32_t notified_serial{}; + }; + // Cache of DomainInfo notified_serial values + class TransientDomainInfoCache : public boost::noncopyable + { + public: + bool get(uint32_t domainid, TransientDomainInfo& data) const; + void remove(uint32_t domainid); + void update(uint32_t domainid, const TransientDomainInfo& data); + + private: + std::unordered_map d_data; + }; + static SharedLockGuarded s_transient_domain_info; + vector d_currentrrset; size_t d_currentrrsetpos; + time_t d_currentrrsettime; MDBOutVal d_currentKey; MDBOutVal d_currentVal; bool d_includedisabled; @@ -332,6 +362,7 @@ bool d_dolog; bool d_random_ids; bool d_handle_dups; + bool d_write_notification_update; DTime d_dtime; // used only for logging uint64_t d_mapsize; }; diff -Nru pdns-4.9.7/pdns/Makefile.am pdns-4.9.14/pdns/Makefile.am --- pdns-4.9.7/pdns/Makefile.am 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/Makefile.am 2026-04-08 09:58:17.000000000 +0000 @@ -101,7 +101,7 @@ if HAVE_VENV api-swagger.json: api-swagger.yaml requirements.txt $(PYTHON) -m venv .venv - .venv/bin/pip install -U pip setuptools setuptools-git wheel + .venv/bin/pip install -U pip "setuptools<82" setuptools-git wheel .venv/bin/pip install -r ${srcdir}/requirements.txt .venv/bin/python ${srcdir}/convert-yaml-to-json.py $< $@ else # if HAVE_VENV diff -Nru pdns-4.9.7/pdns/Makefile.in pdns-4.9.14/pdns/Makefile.in --- pdns-4.9.7/pdns/Makefile.in 2025-07-07 07:42:41.000000000 +0000 +++ pdns-4.9.14/pdns/Makefile.in 2026-04-08 09:58:47.000000000 +0000 @@ -4611,7 +4611,7 @@ @HAVE_VENV_TRUE@api-swagger.json: api-swagger.yaml requirements.txt @HAVE_VENV_TRUE@ $(PYTHON) -m venv .venv -@HAVE_VENV_TRUE@ .venv/bin/pip install -U pip setuptools setuptools-git wheel +@HAVE_VENV_TRUE@ .venv/bin/pip install -U pip "setuptools<82" setuptools-git wheel @HAVE_VENV_TRUE@ .venv/bin/pip install -r ${srcdir}/requirements.txt @HAVE_VENV_TRUE@ .venv/bin/python ${srcdir}/convert-yaml-to-json.py $< $@ @HAVE_API_SWAGGER_JSON_FALSE@@HAVE_VENV_FALSE@api-swagger.json: diff -Nru pdns-4.9.7/pdns/api-swagger.json pdns-4.9.14/pdns/api-swagger.json --- pdns-4.9.7/pdns/api-swagger.json 2025-07-07 07:43:11.000000000 +0000 +++ pdns-4.9.14/pdns/api-swagger.json 2026-04-08 09:59:16.000000000 +0000 @@ -2318,18 +2318,18 @@ }, "changetype": { "type": "string", - "description": "MUST be added when updating the RRSet. Must be REPLACE or DELETE. With DELETE, all existing RRs matching name and type will be deleted, including all comments. With REPLACE: when records is present, all existing RRs matching name and type will be deleted, and then new records given in records will be created. If no records are left, any existing comments will be deleted as well. When comments is present, all existing comments for the RRs matching name and type will be deleted, and then new comments given in comments will be created." + "description": "MUST be added when updating the RRSet. Must be one of DELETE, EXTEND, PRUNE or REPLACE. With DELETE, all existing RRs matching name and type will be deleted, including all comments. With EXTEND, only a single record shall be present, and it will be added to the RRSet if not already present. With PRUNE, only a single record shall be present, and it will be deleted from the RRSet if present. With REPLACE, when records is present, all existing RRs matching name and type will be deleted, and then new records given in records will be created. If no records are left, any existing comments will be deleted as well. When comments is present, all existing comments for the RRs matching name and type will be deleted, and then new comments given in comments will be created." }, "records": { "type": "array", - "description": "All records in this RRSet. When updating Records, this is the list of new records (replacing the old ones). Must be empty when changetype is set to DELETE. An empty list results in deletion of all records (and comments).", + "description": "All records in this RRSet. When updating Records, this is the list of new records (replacing the old ones). Must be empty when changetype is set to DELETE, and must contain only one element when changetype is set to EXTEND or PRUNE. An empty list results in deletion of all records (and comments).", "items": { "$ref": "#/definitions/Record" } }, "comments": { "type": "array", - "description": "List of Comment. Must be empty when changetype is set to DELETE. An empty list results in deletion of all comments. modified_at is optional and defaults to the current server time.", + "description": "List of Comment. Must be empty when changetype is set to DELETE, EXTEND or PRUNE. An empty list results in deletion of all comments. modified_at is optional and defaults to the current server time.", "items": { "$ref": "#/definitions/Comment" } @@ -2350,6 +2350,10 @@ "disabled": { "type": "boolean", "description": "Whether or not this record is disabled. When unset, the record is not disabled" + }, + "modified_at": { + "type": "integer", + "description": "Timestamp of the last change to the record" } } }, diff -Nru pdns-4.9.7/pdns/api-swagger.yaml pdns-4.9.14/pdns/api-swagger.yaml --- pdns-4.9.7/pdns/api-swagger.yaml 2025-07-07 07:43:02.000000000 +0000 +++ pdns-4.9.14/pdns/api-swagger.yaml 2026-04-08 09:59:08.000000000 +0000 @@ -1086,15 +1086,15 @@ description: 'DNS TTL of the records, in seconds. MUST NOT be included when changetype is set to “DELETE”.' changetype: type: string - description: 'MUST be added when updating the RRSet. Must be REPLACE or DELETE. With DELETE, all existing RRs matching name and type will be deleted, including all comments. With REPLACE: when records is present, all existing RRs matching name and type will be deleted, and then new records given in records will be created. If no records are left, any existing comments will be deleted as well. When comments is present, all existing comments for the RRs matching name and type will be deleted, and then new comments given in comments will be created.' + description: 'MUST be added when updating the RRSet. Must be one of DELETE, EXTEND, PRUNE or REPLACE. With DELETE, all existing RRs matching name and type will be deleted, including all comments. With EXTEND, only a single record shall be present, and it will be added to the RRSet if not already present. With PRUNE, only a single record shall be present, and it will be deleted from the RRSet if present. With REPLACE, when records is present, all existing RRs matching name and type will be deleted, and then new records given in records will be created. If no records are left, any existing comments will be deleted as well. When comments is present, all existing comments for the RRs matching name and type will be deleted, and then new comments given in comments will be created.' records: type: array - description: 'All records in this RRSet. When updating Records, this is the list of new records (replacing the old ones). Must be empty when changetype is set to DELETE. An empty list results in deletion of all records (and comments).' + description: 'All records in this RRSet. When updating Records, this is the list of new records (replacing the old ones). Must be empty when changetype is set to DELETE, and must contain only one element when changetype is set to EXTEND or PRUNE. An empty list results in deletion of all records (and comments).' items: $ref: '#/definitions/Record' comments: type: array - description: 'List of Comment. Must be empty when changetype is set to DELETE. An empty list results in deletion of all comments. modified_at is optional and defaults to the current server time.' + description: 'List of Comment. Must be empty when changetype is set to DELETE, EXTEND or PRUNE. An empty list results in deletion of all comments. modified_at is optional and defaults to the current server time.' items: $ref: '#/definitions/Comment' @@ -1110,6 +1110,9 @@ disabled: type: boolean description: 'Whether or not this record is disabled. When unset, the record is not disabled' + modified_at: + type: integer + description: 'Timestamp of the last change to the record' Comment: title: Comment diff -Nru pdns-4.9.7/pdns/auth-caches.cc pdns-4.9.14/pdns/auth-caches.cc --- pdns-4.9.7/pdns/auth-caches.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/auth-caches.cc 2026-04-08 09:58:17.000000000 +0000 @@ -31,8 +31,9 @@ uint64_t purgeAuthCaches() { uint64_t ret = 0; - ret += PC.purge(); + /* Clean query cache before packet cache to avoid potential race condition */ ret += QC.purge(); + ret += PC.purge(); return ret; } @@ -40,8 +41,9 @@ uint64_t purgeAuthCaches(const std::string& match) { uint64_t ret = 0; - ret += PC.purge(match); + /* Clean query cache before packet cache to avoid potential race condition */ ret += QC.purge(match); + ret += PC.purge(match); return ret; } @@ -49,8 +51,9 @@ uint64_t purgeAuthCachesExact(const DNSName& qname) { uint64_t ret = 0; - ret += PC.purgeExact(qname); + /* Clean query cache before packet cache to avoid potential race condition */ ret += QC.purgeExact(qname); + ret += PC.purgeExact(qname); return ret; } diff -Nru pdns-4.9.7/pdns/backends/gsql/gsqlbackend.hh pdns-4.9.14/pdns/backends/gsql/gsqlbackend.hh --- pdns-4.9.7/pdns/backends/gsql/gsqlbackend.hh 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/backends/gsql/gsqlbackend.hh 2026-04-08 09:58:17.000000000 +0000 @@ -42,10 +42,10 @@ d_db.reset(); } - void setDB(SSql *db) + void setDB(std::unique_ptr&& database) { freeStatements(); - d_db=std::unique_ptr(db); + d_db = std::move(database); if (d_db) { d_db->setLog(::arg().mustDo("query-logging")); } diff -Nru pdns-4.9.7/pdns/dns.hh pdns-4.9.14/pdns/dns.hh --- pdns-4.9.7/pdns/dns.hh 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/dns.hh 2026-04-08 09:58:17.000000000 +0000 @@ -77,7 +77,7 @@ // Aligned on 8-byte boundaries on systems where time_t is 8 bytes and int // is 4 bytes, aka modern linux on x86_64 - time_t last_modified{}; //!< For autocalculating SOA serial numbers - the backend needs to fill this in + time_t last_modified{}; //!< Timestamp of last update, if known by the backend uint32_t ttl{}; //!< Time To Live of this record uint32_t signttl{}; //!< If non-zero, use this TTL as original TTL in the RRSIG diff -Nru pdns-4.9.7/pdns/dnspacket.cc pdns-4.9.14/pdns/dnspacket.cc --- pdns-4.9.7/pdns/dnspacket.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/dnspacket.cc 2026-04-08 09:58:17.000000000 +0000 @@ -615,7 +615,7 @@ else if (s_doEDNSCookieProcessing && option.first == EDNSOptionCode::COOKIE) { d_haveednscookie = true; d_eco.makeFromString(option.second); - d_ednscookievalid = d_eco.isValid(s_EDNSCookieKey, d_remote); + d_ednscookievalid = d_eco.isValid(s_EDNSCookieKey, getInnerRemote()); } else { // cerr<<"Have an option #"<first<<": "<second)< 255) { + throw runtime_error("invalid unquoted text length"); + } d_content.push_back(text.length()); + } d_content.insert(d_content.end(), text.c_str(), text.c_str() + text.length()); } @@ -413,11 +417,14 @@ break; case SvcParam::alpn: { - uint16_t totalSize = param.getALPN().size(); // All 1 octet size headers for each value + size_t totalSize = param.getALPN().size(); // All 1 octet size headers for each value for (auto const &a : param.getALPN()) { totalSize += a.length(); } - xfr16BitInt(totalSize); + if (totalSize > std::numeric_limits::max()) { + throw runtime_error("invalid total length of alpn parameters"); + } + xfr16BitInt(static_cast(totalSize)); for (auto const &a : param.getALPN()) { xfrUnquotedText(a, true); // will add the 1-byte length field } diff -Nru pdns-4.9.7/pdns/lua-record.cc pdns-4.9.14/pdns/lua-record.cc --- pdns-4.9.7/pdns/lua-record.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/lua-record.cc 2026-04-08 09:58:17.000000000 +0000 @@ -884,8 +884,6 @@ if(labels.size()<4) return std::string("unknown"); - vector candidates; - // so, query comes in for 4.3.2.1.in-addr.arpa, zone is called 2.1.in-addr.arpa // e["1.2.3.4"]="bert.powerdns.com" then provides an exception if(e) { @@ -910,7 +908,10 @@ return fmt.str(); } catch(std::exception& ex) { - g_log<> excp){ - vector candidates; - try { auto labels= s_lua_record_ctx->qname.getRawLabels(); if (labels.size()<32) { @@ -1069,12 +1070,12 @@ return fmt.str(); } catch(std::exception& ex) { - g_log< fallback) -> vector { diff -Nru pdns-4.9.7/pdns/opensslsigners.cc pdns-4.9.14/pdns/opensslsigners.cc --- pdns-4.9.7/pdns/opensslsigners.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/opensslsigners.cc 2026-04-08 09:58:17.000000000 +0000 @@ -942,7 +942,7 @@ if (contentLen < 3) { throw runtime_error(getName() + " invalid input size for the public key"); } - const size_t exponentSize = raw[1] * 0xff + raw[2]; + const size_t exponentSize = (static_cast(raw[1])) * 0x100 + raw[2]; if (contentLen < (exponentSize + 4)) { throw runtime_error(getName() + " invalid input size for the public key"); } diff -Nru pdns-4.9.7/pdns/rcpgenerator.cc pdns-4.9.14/pdns/rcpgenerator.cc --- pdns-4.9.7/pdns/rcpgenerator.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/rcpgenerator.cc 2026-04-08 09:58:17.000000000 +0000 @@ -121,6 +121,13 @@ throw RecordTextException("unable to parse '"+std::to_string(itmp)+"' into a valid time at position "+std::to_string(d_pos)+" in '"+d_string+"'"); } + // Note tm_mon is still in 1..12 range at this point + if (tm.tm_sec < 0 || tm.tm_sec > 60 || tm.tm_min < 0 || tm.tm_min > 59 || + tm.tm_hour < 0 || tm.tm_hour > 23 || tm.tm_mday < 0 || tm.tm_mday > 31 || + tm.tm_mon < 1 || tm.tm_mon > 12) { + throw RecordTextException("invalid time specification '"+std::to_string(itmp)+"' at position "+std::to_string(d_pos)+" in '"+d_string+"'"); + } + tm.tm_year-=1900; tm.tm_mon-=1; // coverity[store_truncates_time_t] @@ -424,6 +431,12 @@ while (spos < v.length()) { len = v.at(spos); spos += 1; + if (len == 0) { + throw RecordTextException("ALPN values cannot be empty strings"); + } + if (len > 255) { + throw RecordTextException("Length of ALPN value goes over 255"); + } if (len > v.length() - spos) { throw RecordTextException("Length of ALPN value goes over total length of alpn SVC Param"); } @@ -432,6 +445,11 @@ } } else { xfrSVCBValueList(value); + for (const auto& item : value) { + if (item.length() > 255) { + throw RecordTextException("Length of SVC ALPN value goes over 255"); + } + } } val.insert(SvcParam(key, std::move(value))); break; diff -Nru pdns-4.9.7/pdns/requirements.txt pdns-4.9.14/pdns/requirements.txt --- pdns-4.9.7/pdns/requirements.txt 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/requirements.txt 2026-04-08 09:58:17.000000000 +0000 @@ -1 +1,2 @@ PyYAML +setuptools<82 diff -Nru pdns-4.9.7/pdns/rfc2136handler.cc pdns-4.9.14/pdns/rfc2136handler.cc --- pdns-4.9.7/pdns/rfc2136handler.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/rfc2136handler.cc 2026-04-08 09:58:17.000000000 +0000 @@ -540,6 +540,21 @@ return changedRecords; } +static void socketCleaner(int* sock) +{ + if (*sock < 0) { + return; // nothing to do + } + + try { + closesocket(*sock); + *sock = -1; + } + catch (const PDNSException& e) { + g_log << Logger::Error << "Error closing primary forwarding socket: " << e.reason << endl; + } +} + int PacketHandler::forwardPacket(const string &msgPrefix, const DNSPacket& p, const DomainInfo& di) { vector forward; B.getDomainMetadata(p.qdomain, "FORWARD-DNSUPDATE", forward); @@ -562,89 +577,57 @@ continue; } - if( connect(sock, (struct sockaddr*)&remote, remote.getSocklen()) < 0 ) { - g_log< guard(&sock, socketCleaner); - DNSPacket l_forwardPacket(p); - l_forwardPacket.setID(dns_random_uint16()); - l_forwardPacket.setRemote(&remote); - uint16_t len=htons(l_forwardPacket.getString().length()); - string buffer((const char*)&len, 2); - buffer.append(l_forwardPacket.getString()); - if(write(sock, buffer.c_str(), buffer.length()) < 0) { - g_log<(recvRes) < sizeof(lenBuf)) { - g_log << Logger::Error << msgPrefix << "Could not receive data (length) from primary at " << remote.toStringWithPort() << ", error:" << stringerror() << endl; - try { - closesocket(sock); + std::array lenBuf{}; + recvRes = recv(sock, lenBuf.data(), lenBuf.size(), 0); + if (recvRes < 0 || static_cast(recvRes) < lenBuf.size()) { + g_log << Logger::Error << msgPrefix << "Could not receive data (length) from primary at " << remote.toStringWithPort() << ", error:" << stringerror() << endl; + continue; } - catch(const PDNSException& e) { - g_log << Logger::Error << "Error closing primary forwarding socket after recv() failed: " << e.reason << endl; - } - continue; - } - size_t packetLen = lenBuf[0]*256+lenBuf[1]; + size_t packetLen = lenBuf[0]*256+lenBuf[1]; - buffer.resize(packetLen); - recvRes = recv(sock, &buffer.at(0), packetLen, 0); - if (recvRes < 0) { - g_log << Logger::Error << msgPrefix << "Could not receive data (dnspacket) from primary at " << remote.toStringWithPort() << ", error:" << stringerror() << endl; - try { - closesocket(sock); + if (packetLen == 0) { + g_log << Logger::Warning << msgPrefix << "Empty update sent by primary at " << remote.toStringWithPort() << endl; + continue; } - catch(const PDNSException& e) { - g_log << Logger::Error << "Error closing primary forwarding socket after recv() failed: " << e.reason << endl; + + buffer.resize(packetLen); + recvRes = recv(sock, &buffer.at(0), packetLen, 0); + if (recvRes < 0) { + g_log << Logger::Error << msgPrefix << "Could not receive data (dnspacket) from primary at " << remote.toStringWithPort() << ", error:" << stringerror() << endl; + continue; } - continue; - } - try { - closesocket(sock); - } - catch(const PDNSException& e) { - g_log << Logger::Error << "Error closing primary forwarding socket: " << e.reason << endl; - } + } // socketCleaner scope try { MOADNSParser mdp(false, buffer.data(), static_cast(recvRes)); @@ -661,6 +644,7 @@ } +// NOLINTNEXTLINE(readability-function-cognitive-complexity) int PacketHandler::processUpdate(DNSPacket& p) { if (! ::arg().mustDo("dnsupdate")) return RCode::Refused; @@ -806,8 +790,10 @@ const DNSRecord* rr = &i.first; if (rr->d_place == DNSResourceRecord::ANSWER) { // Last line of 3.2.3 - if (rr->d_class != QClass::IN && rr->d_class != QClass::NONE && rr->d_class != QClass::ANY) + if (rr->d_class != QClass::IN && rr->d_class != QClass::NONE && rr->d_class != QClass::ANY) { + di.backend->abortTransaction(); return RCode::FormErr; + } if (rr->d_class == QClass::IN) { rrSetKey_t key = {rr->d_name, QType(rr->d_type)}; diff -Nru pdns-4.9.7/pdns/test-dnsrecords_cc.cc pdns-4.9.14/pdns/test-dnsrecords_cc.cc --- pdns-4.9.7/pdns/test-dnsrecords_cc.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/test-dnsrecords_cc.cc 2026-04-08 09:58:17.000000000 +0000 @@ -223,6 +223,7 @@ (CASE_S(QType::SVCB, "1 foo.powerdns.org. mandatory=alpn", "\0\x01\3foo\x08powerdns\x03org\x00\x00\x00\x00\x02\x00\x01")) (CASE_S(QType::SVCB, "1 foo.powerdns.org. no-default-alpn", "\0\x01\3foo\x08powerdns\x03org\x00\x00\x02\x00\x00")) (CASE_S(QType::SVCB, "1 foo.powerdns.org. alpn=h3,h2", "\0\x01\3foo\x08powerdns\x03org\x00\x00\x01\x00\x06\x02h3\x02h2")) + (BROKEN_CASE_S(QType::SVCB, "1 foo.powerdns.org. alpn=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", "\0\x01\3foo\x08powerdns\x03org\x00\x00\x01\x00\x06\x02h3\x02h2")) (CASE_S(QType::SVCB, "1 foo.powerdns.org. port=53", "\0\x01\3foo\x08powerdns\x03org\x00\x00\x03\x00\x02\x00\x35")) (CASE_S(QType::SVCB, "1 foo.powerdns.org. ipv4hint=192.0.2.53,192.0.2.2", "\0\x01\3foo\x08powerdns\x03org\x00\x00\x04\x00\x08\xc0\x00\x02\x35\xc0\x00\x02\x02")) (CASE_S(QType::SVCB, "1 foo.powerdns.org. ech=\"aGVsbG8=\"", "\0\x01\3foo\x08powerdns\x03org\x00\x00\x05\x00\x05hello")) diff -Nru pdns-4.9.7/pdns/unix_utility.cc pdns-4.9.14/pdns/unix_utility.cc --- pdns-4.9.7/pdns/unix_utility.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/unix_utility.cc 2026-04-08 09:58:17.000000000 +0000 @@ -239,9 +239,10 @@ time_t i; time_t years = t->tm_year - 70; + // We allow for some benign out-of-range values if (t->tm_sec>60) { t->tm_min += t->tm_sec/60; t->tm_sec%=60; } if (t->tm_min>60) { t->tm_hour += t->tm_min/60; t->tm_min%=60; } - if (t->tm_hour>60) { t->tm_mday += t->tm_hour/60; t->tm_hour%=60; } + if (t->tm_hour>24) { t->tm_mday += t->tm_hour/24; t->tm_hour%=24; } if (t->tm_mon>11) { t->tm_year += t->tm_mon/12; t->tm_mon%=12; } while (t->tm_mday>spm[1+t->tm_mon]) { diff -Nru pdns-4.9.7/pdns/ws-auth.cc pdns-4.9.14/pdns/ws-auth.cc --- pdns-4.9.7/pdns/ws-auth.cc 2025-07-07 07:42:15.000000000 +0000 +++ pdns-4.9.14/pdns/ws-auth.cc 2026-04-08 09:58:17.000000000 +0000 @@ -89,7 +89,7 @@ return d_max; } -static void patchZone(UeberBackend& backend, const DNSName& zonename, DomainInfo& domainInfo, HttpRequest* req, HttpResponse* resp); +static void patchZone(UeberBackend& backend, const DNSName& zonename, DomainInfo& domainInfo, const vector& rrsets, HttpResponse* resp); // QTypes that MUST NOT have multiple records of the same type in a given RRset. static const std::set onlyOneEntryTypes = {QType::CNAME, QType::DNAME, QType::SOA}; @@ -523,9 +523,13 @@ while (rit != records.end() && rit->qname == current_qname && rit->qtype == current_qtype) { ttl = min(ttl, rit->ttl); - rrset_records.push_back(Json::object{ + auto object = Json::object{ {"disabled", rit->disabled}, - {"content", makeApiRecordContent(rit->qtype, rit->content)}}); + {"content", makeApiRecordContent(rit->qtype, rit->content)}}; + if (rit->last_modified != 0) { + object["modified_at"] = (double)rit->last_modified; + } + rrset_records.push_back(object); rit++; } while (cit != comments.end() && cit->qname == current_qname && cit->qtype == current_qtype) { @@ -2021,28 +2025,34 @@ domainInfo.backend->startTransaction(zonename, static_cast(domainInfo.id)); - // will be overridden by updateDomainSettingsFromDocument, if given in document. - domainInfo.backend->setDomainMetadataOne(zonename, "SOA-EDIT-API", "DEFAULT"); + try { + // will be overridden by updateDomainSettingsFromDocument, if given in document. + domainInfo.backend->setDomainMetadataOne(zonename, "SOA-EDIT-API", "DEFAULT"); - for (auto& resourceRecord : new_records) { - resourceRecord.domain_id = static_cast(domainInfo.id); - domainInfo.backend->feedRecord(resourceRecord, DNSName()); - } - for (Comment& comment : new_comments) { - comment.domain_id = static_cast(domainInfo.id); - if (!domainInfo.backend->feedComment(comment)) { - throw ApiException("Hosting backend does not support editing comments."); + for (auto& resourceRecord : new_records) { + resourceRecord.domain_id = static_cast(domainInfo.id); + domainInfo.backend->feedRecord(resourceRecord, DNSName()); + } + for (Comment& comment : new_comments) { + comment.domain_id = static_cast(domainInfo.id); + if (!domainInfo.backend->feedComment(comment)) { + throw ApiException("Hosting backend does not support editing comments."); + } } - } - updateDomainSettingsFromDocument(backend, domainInfo, zonename, document, !new_records.empty()); + updateDomainSettingsFromDocument(backend, domainInfo, zonename, document, !new_records.empty()); - if (!catalog && kind == DomainInfo::Primary) { - const auto& defaultCatalog = ::arg()["default-catalog-zone"]; - if (!defaultCatalog.empty()) { - domainInfo.backend->setCatalog(zonename, DNSName(defaultCatalog)); + if (!catalog && kind == DomainInfo::Primary) { + const auto& defaultCatalog = ::arg()["default-catalog-zone"]; + if (!defaultCatalog.empty()) { + domainInfo.backend->setCatalog(zonename, DNSName(defaultCatalog)); + } } } + catch (...) { + domainInfo.backend->abortTransaction(); + throw; + } domainInfo.backend->commitTransaction(); @@ -2228,7 +2238,14 @@ static void apiServerZoneDetailPATCH(HttpRequest* req, HttpResponse* resp) { ZoneData zoneData{req}; - patchZone(zoneData.backend, zoneData.zoneName, zoneData.domainInfo, req, resp); + Json document = req->json(); + + auto rrsets = document["rrsets"]; + if (!rrsets.is_array()) { + throw ApiException("No rrsets given in update request"); + } + + patchZone(zoneData.backend, zoneData.zoneName, zoneData.domainInfo, rrsets.array_items(), resp); } static void apiServerZoneDetailGET(HttpRequest* req, HttpResponse* resp) @@ -2306,173 +2323,383 @@ resp->setSuccessResult("Rectified"); } -// NOLINTNEXTLINE(readability-function-cognitive-complexity): TODO Refactor this function. -static void patchZone(UeberBackend& backend, const DNSName& zonename, DomainInfo& domainInfo, HttpRequest* req, HttpResponse* resp) +// The allowed values for the "changetype" field of a Json patch record. +enum changeType { - bool zone_disabled = false; - SOAData soaData; + DELETE, // delete complete RRset + REPLACE, // replace complete RRset + PRUNE, // remove single record from RRset if found + EXTEND // add single record to RRset if not found +}; - vector new_records; - vector new_comments; - vector new_ptrs; +// Validate the "changetype" field of a Json patch record. +// Returns the recognized operation. +// Throws an exception if unrecognized. +static changeType validateChangeType(const std::string& changetype) +{ + if (changetype == "DELETE") { + return DELETE; + } + if (changetype == "REPLACE") { + return REPLACE; + } + if (changetype == "PRUNE") { + return PRUNE; + } + if (changetype == "EXTEND") { + return EXTEND; + } + throw ApiException("Changetype '" + changetype + "' is not a valid value"); +} - Json document = req->json(); +// Replace the rrset for `qname' in zone `zonename' with the contents of +// `new_records', making sure to remove no longer needed ENT entries, and +// also enforcing the exclusivity rules (at most one CNAME, DNAME and SOA, +// etc). +static void replaceZoneRecords(const DomainInfo& domainInfo, const DNSName& zonename, vector& new_records, const DNSName& qname, const QType qtype) +{ + bool ent_present = false; + bool dname_seen = qtype == QType::DNAME; + bool ns_seen = qtype == QType::NS; - auto rrsets = document["rrsets"]; - if (!rrsets.is_array()) { - throw ApiException("No rrsets given in update request"); + domainInfo.backend->lookup(QType(QType::ANY), qname, static_cast(domainInfo.id)); + DNSResourceRecord resourceRecord; + while (domainInfo.backend->get(resourceRecord)) { + if (resourceRecord.qtype.getCode() == QType::ENT) { + ent_present = true; + // that's fine, we will override it + continue; + } + dname_seen |= resourceRecord.qtype == QType::DNAME; + ns_seen |= resourceRecord.qtype == QType::NS; + if (qtype.getCode() != resourceRecord.qtype.getCode() + && (exclusiveEntryTypes.count(qtype.getCode()) != 0 + || exclusiveEntryTypes.count(resourceRecord.qtype.getCode()) != 0)) { + // leave database handle in a consistent state + while (domainInfo.backend->get(resourceRecord)) { + ; + } + throw ApiException("RRset " + qname.toString() + " IN " + qtype.toString() + ": Conflicts with pre-existing RRset"); + } + } + if (dname_seen && ns_seen && qname != zonename) { + throw ApiException("RRset " + qname.toString() + " IN " + qtype.toString() + ": Cannot have both NS and DNAME except in zone apex"); + } + if (!new_records.empty() && ent_present) { + QType qt_ent{QType::ENT}; + if (!domainInfo.backend->replaceRRSet(domainInfo.id, qname, qt_ent, new_records)) { + throw ApiException("Hosting backend does not support editing records."); + } + } + if (!domainInfo.backend->replaceRRSet(domainInfo.id, qname, qtype, new_records)) { + throw ApiException("Hosting backend does not support editing records."); } +} - domainInfo.backend->startTransaction(zonename); +// Parse the record name and type from a Json patch record. +static void parseRecordNameAndType(const Json& rrset, DNSName& qname, QType& qtype) +{ + qname = apiNameToDNSName(stringFromJson(rrset, "name")); + apiCheckQNameAllowedCharacters(qname.toString()); + qtype = stringFromJson(rrset, "type"); + if (qtype.getCode() == QType::ENT) { + throw ApiException("RRset " + qname.toString() + " IN " + stringFromJson(rrset, "type") + ": unknown type given"); + } +} - try { - string soa_edit_api_kind; - string soa_edit_kind; - domainInfo.backend->getDomainMetadataOne(zonename, "SOA-EDIT-API", soa_edit_api_kind); - domainInfo.backend->getDomainMetadataOne(zonename, "SOA-EDIT", soa_edit_kind); - bool soa_edit_done = false; +// The return value of the apply* functions below +enum applyResult +{ + SUCCESS, // successful and changes performed + NOP, // successful but no changes needed +#if 0 // Not possible in 4.9.x + ABORT // failed horribly, don't process anything further +#endif +}; - set> seen; +// Apply a DELETE changetype. +static applyResult applyDelete(const DomainInfo& domainInfo, DNSName& qname, QType& qtype, bool returnRRset, std::vector& rrset) +{ + // Delete all matching qname/qtype RRs (and implicitly, comments). + if (!domainInfo.backend->replaceRRSet(domainInfo.id, qname, qtype, {})) { + throw ApiException("Hosting backend does not support editing records."); + } + // Update RRset cache if needed + if (returnRRset) { + rrset.clear(); + } + return SUCCESS; +} - for (const auto& rrset : rrsets.array_items()) { - string changetype = toUpper(stringFromJson(rrset, "changetype")); - DNSName qname = apiNameToDNSName(stringFromJson(rrset, "name")); - apiCheckQNameAllowedCharacters(qname.toString()); - QType qtype; - qtype = stringFromJson(rrset, "type"); - if (qtype.getCode() == 0) { - throw ApiException("RRset " + qname.toString() + " IN " + stringFromJson(rrset, "type") + ": unknown type given"); - } +// Struct gathering the SOA edition details, so as not to pass too many +// billions of parameters to applyReplace() below. +struct soaEditSettings +{ + bool edit_done{false}; + string edit_api_kind; + string edit_kind; +}; - if (seen.count({qname, qtype, changetype}) != 0) { - throw ApiException("Duplicate RRset " + qname.toString() + " IN " + qtype.toString() + " with changetype: " + changetype); - } - seen.insert({qname, qtype, changetype}); +// Apply a REPLACE changetype. +static applyResult applyReplace(const DomainInfo& domainInfo, const DNSName& zonename, const Json& container, DNSName& qname, QType& qtype, soaEditSettings& soa, bool returnRRset, std::vector& rrset) +{ + bool replace_records = container["records"].is_array(); + bool replace_comments = container["comments"].is_array(); - if (changetype == "DELETE") { - // delete all matching qname/qtype RRs (and, implicitly comments). - if (!domainInfo.backend->replaceRRSet(domainInfo.id, qname, qtype, vector())) { - throw ApiException("Hosting backend does not support editing records."); + if (!replace_records && !replace_comments) { + throw ApiException("No change for RRset " + qname.toString() + " IN " + qtype.toString()); + } + + vector new_records; + vector new_comments; + + try { + if (replace_records) { + // ttl shouldn't be required if we don't get new records. + uint32_t ttl = uintFromJson(container, "ttl"); + gatherRecords(container, qname, qtype, ttl, new_records); + + for (DNSResourceRecord& resourceRecord : new_records) { + resourceRecord.domain_id = static_cast(domainInfo.id); + if (resourceRecord.qtype.getCode() == QType::SOA && resourceRecord.qname == zonename) { + soa.edit_done = increaseSOARecord(resourceRecord, soa.edit_api_kind, soa.edit_kind); } } - else if (changetype == "REPLACE") { - // we only validate for REPLACE, as DELETE can be used to "fix" out of zone records. - if (!qname.isPartOf(zonename) && qname != zonename) { - throw ApiException("RRset " + qname.toString() + " IN " + qtype.toString() + ": Name is out of zone"); - } + checkNewRecords(new_records, zonename); + } - bool replace_records = rrset["records"].is_array(); - bool replace_comments = rrset["comments"].is_array(); + if (replace_comments) { + gatherComments(container, qname, qtype, new_comments); - if (!replace_records && !replace_comments) { - throw ApiException("No change for RRset " + qname.toString() + " IN " + qtype.toString()); - } + for (Comment& comment : new_comments) { + comment.domain_id = static_cast(domainInfo.id); + } + } + } + catch (const JsonException& e) { + throw ApiException("New RRsets are invalid: " + string(e.what())); + } - new_records.clear(); - new_comments.clear(); + if (replace_records) { + replaceZoneRecords(domainInfo, zonename, new_records, qname, qtype); + } + if (replace_comments) { + if (!domainInfo.backend->replaceComments(domainInfo.id, qname, qtype, new_comments)) { + throw ApiException("Hosting backend does not support editing comments."); + } + } + // Update RRset cache if needed + if (returnRRset) { + rrset = std::move(new_records); + } + return SUCCESS; +} - try { - if (replace_records) { - // ttl shouldn't be part of DELETE, and it shouldn't be required if we don't get new records. - uint32_t ttl = uintFromJson(rrset, "ttl"); - gatherRecords(rrset, qname, qtype, ttl, new_records); - - for (DNSResourceRecord& resourceRecord : new_records) { - resourceRecord.domain_id = static_cast(domainInfo.id); - if (resourceRecord.qtype.getCode() == QType::SOA && resourceRecord.qname == zonename) { - soa_edit_done = increaseSOARecord(resourceRecord, soa_edit_api_kind, soa_edit_kind); - } - } - checkNewRecords(new_records, zonename); - } +static applyResult applyPruneOrExtend(const DomainInfo& domainInfo, const DNSName& zonename, const Json& container, DNSName& qname, QType& qtype, soaEditSettings& soa, changeType operationType, std::vector& rrset) +{ + if (!container["records"].is_array()) { + throw ApiException("No record provided for PRUNE or EXTEND operation"); + } - if (replace_comments) { - gatherComments(rrset, qname, qtype, new_comments); + try { + vector new_records; + uint32_t ttl = uintFromJson(container, "ttl"); + gatherRecords(container, qname, qtype, ttl, new_records); + if (new_records.size() != 1) { + throw ApiException("Exactly one record should be provided for PRUNE or EXTEND operation"); + } - for (Comment& comment : new_comments) { - comment.domain_id = static_cast(domainInfo.id); - } - } - } - catch (const JsonException& e) { - throw ApiException("New RRsets are invalid: " + string(e.what())); + auto& new_record = new_records.front(); + new_record.domain_id = static_cast(domainInfo.id); + if (new_record.qtype.getCode() == QType::SOA && new_record.qname == zonename) { + soa.edit_done = increaseSOARecord(new_record, soa.edit_api_kind, soa.edit_kind); + } + + checkNewRecords(new_records, zonename); + + // Check if this record exists in the RRSet + bool seenRecord{false}; + for (auto iter = rrset.begin(); iter != rrset.end(); ++iter) { + if (iter->content == new_record.content) { + // We found the record we've been instructed to add or delete. + seenRecord = true; + // If it is to be added, we don't have anything more to do. + // If it is to be deleted, just remove it from the RRset we're building. + if (operationType == PRUNE) { + rrset.erase(iter); } + break; + } + } + // Add new record to RRset if not found. + if (operationType == EXTEND && !seenRecord) { + rrset.emplace_back(new_record); + } + bool submitChanges = (operationType == EXTEND && !seenRecord) || (operationType == PRUNE && seenRecord); + if (!submitChanges) { + return NOP; + } + if (!domainInfo.backend->replaceRRSet(domainInfo.id, qname, qtype, rrset)) { + throw ApiException("Hosting backend does not support editing records."); + } + } + catch (const JsonException& e) { + throw ApiException("Submitted record is invalid: " + string(e.what())); + } + return SUCCESS; +} - if (replace_records) { - bool ent_present = false; - bool dname_seen = false; - bool ns_seen = false; - - domainInfo.backend->lookup(QType(QType::ANY), qname, static_cast(domainInfo.id)); - DNSResourceRecord resourceRecord; - while (domainInfo.backend->get(resourceRecord)) { - if (resourceRecord.qtype.getCode() == QType::ENT) { - ent_present = true; - /* that's fine, we will override it */ - continue; - } - if (qtype == QType::DNAME || resourceRecord.qtype == QType::DNAME) { - dname_seen = true; - } - if (qtype == QType::NS || resourceRecord.qtype == QType::NS) { - ns_seen = true; - } - if (qtype.getCode() != resourceRecord.qtype.getCode() - && (exclusiveEntryTypes.count(qtype.getCode()) != 0 - || exclusiveEntryTypes.count(resourceRecord.qtype.getCode()) != 0)) { - - // leave database handle in a consistent state - while (domainInfo.backend->get(resourceRecord)) { - ; - } +static void patchZone(UeberBackend& backend, const DNSName& zonename, DomainInfo& domainInfo, const vector& rrsets, HttpResponse* resp) +{ + bool madeAnyChanges{false}; + domainInfo.backend->startTransaction(zonename); + try { + soaEditSettings soa; + domainInfo.backend->getDomainMetadataOne(zonename, "SOA-EDIT-API", soa.edit_api_kind); + domainInfo.backend->getDomainMetadataOne(zonename, "SOA-EDIT", soa.edit_kind); + + // For PRUNE and EXTEND operations, we are not being passed the complete + // RRset, and will need to fetch it from the backend. But we may have + // processed a DELETE or REPLACE operation for the same RRset first, in + // which case we can't assume querying the backend will be consistent with + // the results of that last operation, since we are within a not commited + // yet transaction. + // To be sure to work on consistent contents, without having to rely upon + // specific backend behaviour, we will need to cache the RRset values + // in this routine, but we only need to do that for RRset which are + // subject to both PRUNE/EXTEND and DELETE/REPLACE operation. + // That first pass over the change requests computes this (and also + // performs basic validation). + using key = std::pair; + std::map changes; + for (const auto& rrset : rrsets) { + string changetype = toUpper(stringFromJson(rrset, "changetype")); + auto operationType = validateChangeType(changetype); + DNSName qname; + QType qtype; + parseRecordNameAndType(rrset, qname, qtype); - throw ApiException("RRset " + qname.toString() + " IN " + qtype.toString() + ": Conflicts with pre-existing RRset"); - } - } + if (operationType != DELETE) { + if (domainInfo.kind == DomainInfo::Consumer) { + // Allow deleting all RRsets, just not modifying them. + throw ApiException("Modifying RRsets in Consumer zones is unsupported"); + } - if (dname_seen && ns_seen && qname != zonename) { - throw ApiException("RRset " + qname.toString() + " IN " + qtype.toString() + ": Cannot have both NS and DNAME except in zone apex"); - } - if (!new_records.empty() && domainInfo.kind == DomainInfo::Consumer) { - // Allow deleting all RRsets, just not modifying them. - throw ApiException("Modifying RRsets in Consumer zones is unsupported"); - } - if (!new_records.empty() && ent_present) { - QType qt_ent{0}; - if (!domainInfo.backend->replaceRRSet(domainInfo.id, qname, qt_ent, new_records)) { - throw ApiException("Hosting backend does not support editing records."); - } - } - if (!domainInfo.backend->replaceRRSet(domainInfo.id, qname, qtype, new_records)) { - throw ApiException("Hosting backend does not support editing records."); - } + // We intentionally do not perform this check for DELETE, as it can be + // used as a poor man's way to "fix" out-of-zone records. + if (!qname.isPartOf(zonename)) { + throw ApiException("RRset " + qname.toString() + " IN " + qtype.toString() + ": Name is out of zone"); } - if (replace_comments) { - if (!domainInfo.backend->replaceComments(domainInfo.id, qname, qtype, new_comments)) { - throw ApiException("Hosting backend does not support editing comments."); + } + + // At this point, we store a bitmask of the operations which will need + // to be performed. + unsigned int newOperation = 1U << operationType; + key currentKey{qname, qtype}; + if (auto iter = changes.find(currentKey); iter != changes.end()) { + auto operations = iter->second; + // Only allow one DELETE or REPLACE operation per RRset. On the other + // hand, it makes sense to allow multiple PRUNE or EXTEND, since the + // individual records they'll concern might differ. + if (operationType == DELETE || operationType == REPLACE) { + if ((operations & newOperation) != 0) { + throw ApiException("Duplicate RRset " + qname.toString() + " IN " + qtype.toString() + " with changetype: " + changetype); } } + changes.insert_or_assign(currentKey, operations | newOperation); } else { - throw ApiException("Changetype not understood"); + changes.insert({currentKey, newOperation}); } } - zone_disabled = (!backend.getSOAUncached(zonename, soaData)); + // In this second pass, we will process the changes and maintain a cache + // of the RRset subject to PRUNE/EXTEND operations. + std::map> cache; + for (const auto& container : rrsets) { + string changetype = toUpper(stringFromJson(container, "changetype")); + auto operationType = validateChangeType(changetype); + DNSName qname; + QType qtype; + parseRecordNameAndType(container, qname, qtype); - // edit SOA (if needed) - if (!zone_disabled && !soa_edit_api_kind.empty() && !soa_edit_done) { - DNSResourceRecord resourceRecord; - if (makeIncreasedSOARecord(soaData, soa_edit_api_kind, soa_edit_kind, resourceRecord)) { - if (!domainInfo.backend->replaceRRSet(domainInfo.id, resourceRecord.qname, resourceRecord.qtype, vector(1, resourceRecord))) { - throw ApiException("Hosting backend does not support editing records."); + key currentKey{qname, qtype}; + bool cacheNeeded{false}; + if (auto iter = changes.find(currentKey); iter != changes.end()) { + auto operations = iter->second; + cacheNeeded = (operations & ((1U << PRUNE) | (1U << EXTEND))) != 0; + } + + applyResult result{NOP}; + std::vector rrset; + switch (operationType) { + case DELETE: + result = applyDelete(domainInfo, qname, qtype, cacheNeeded, rrset); + break; + case REPLACE: + result = applyReplace(domainInfo, zonename, container, qname, qtype, soa, cacheNeeded, rrset); + break; + case PRUNE: + case EXTEND: + // First, obtain the current RRset, either from the backend or from + // our local cache if we already did some operations. + if (const auto iter = cache.find(currentKey); iter != cache.end()) { + rrset = std::move(iter->second); + } + else { + DNSResourceRecord record; + domainInfo.backend->lookup(qtype, qname, static_cast(domainInfo.id)); + while (domainInfo.backend->get(record)) { + rrset.emplace_back(record); + } } + result = applyPruneOrExtend(domainInfo, zonename, container, qname, qtype, soa, operationType, rrset); + break; + } +#if 0 // Not possible in 4.9.x + if (result == ABORT) { + // Proper error response has been set up, no need to do anything further. + domainInfo.backend->abortTransaction(); + return; } +#endif + if (result == SUCCESS) { + madeAnyChanges = true; + } + // Update RRset cache if needed. + if (cacheNeeded) { + cache.insert_or_assign(currentKey, std::move(rrset)); + } + } + cache.clear(); + + if (madeAnyChanges) { + SOAData soaData; + bool zone_disabled = (!backend.getSOAUncached(zonename, soaData)); - // return old and new serials in headers - resp->headers["X-PDNS-Old-Serial"] = std::to_string(soaData.serial); - fillSOAData(resourceRecord.content, soaData); - resp->headers["X-PDNS-New-Serial"] = std::to_string(soaData.serial); + if (!zone_disabled && !soa.edit_api_kind.empty() && !soa.edit_done) { + DNSResourceRecord resourceRecord; + if (makeIncreasedSOARecord(soaData, soa.edit_api_kind, soa.edit_kind, resourceRecord)) { + if (!domainInfo.backend->replaceRRSet(domainInfo.id, resourceRecord.qname, resourceRecord.qtype, vector(1, resourceRecord))) { + throw ApiException("Hosting backend does not support editing records."); + } + } + + // return old and new serials in headers + resp->headers["X-PDNS-Old-Serial"] = std::to_string(soaData.serial); + fillSOAData(resourceRecord.content, soaData); + resp->headers["X-PDNS-New-Serial"] = std::to_string(soaData.serial); + } + + // Rectify + DNSSECKeeper dnssecKeeper(&backend); + if (!zone_disabled && !dnssecKeeper.isPresigned(zonename) && isZoneApiRectifyEnabled(domainInfo)) { + string info; + string error_msg; + if (!dnssecKeeper.rectifyZone(zonename, error_msg, info, false)) { + throw ApiException("Failed to rectify '" + zonename.toString() + "' " + error_msg); + } + } } } catch (...) { @@ -2480,20 +2707,15 @@ throw; } - // Rectify - DNSSECKeeper dnssecKeeper(&backend); - if (!zone_disabled && !dnssecKeeper.isPresigned(zonename) && isZoneApiRectifyEnabled(domainInfo)) { - string info; - string error_msg; - if (!dnssecKeeper.rectifyZone(zonename, error_msg, info, false)) { - throw ApiException("Failed to rectify '" + zonename.toString() + "' " + error_msg); - } - } + if (madeAnyChanges) { + domainInfo.backend->commitTransaction(); - domainInfo.backend->commitTransaction(); - - DNSSECKeeper::clearCaches(zonename); - purgeAuthCaches(zonename.toString() + "$"); + DNSSECKeeper::clearCaches(zonename); + purgeAuthCaches(zonename.toString() + "$"); + } + else { + domainInfo.backend->abortTransaction(); + } resp->body = ""; resp->status = 204; // No Content, but indicate success @@ -2578,6 +2800,9 @@ {"ttl", (double)resourceRecord.ttl}, {"disabled", resourceRecord.disabled}, {"content", makeApiRecordContent(resourceRecord.qtype, resourceRecord.content)}}; + if (resourceRecord.last_modified != 0) { + object["modified_at"] = (double)resourceRecord.last_modified; + } val = zoneIdZone.find(resourceRecord.domain_id); if (val != zoneIdZone.end()) {